aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2010-05-10 12:39:28 -0400
committerPatrick McHardy <kaber@trash.net>2010-05-10 12:39:28 -0400
commit1e4b1057121bc756b91758a434b504d2010f6088 (patch)
treeb016cf2c728289c7e36d9e4e488f30ab0bd0ae6e
parent3b254c54ec46eb022cb26ee6ab37fae23f5f7d6a (diff)
parent3ee943728fff536edaf8f59faa58aaa1aa7366e3 (diff)
Merge branch 'master' of /repos/git/net-next-2.6
Conflicts: net/bridge/br_device.c net/bridge/br_forward.c Signed-off-by: Patrick McHardy <kaber@trash.net>
-rw-r--r--Documentation/RCU/NMI-RCU.txt39
-rw-r--r--Documentation/RCU/checklist.txt7
-rw-r--r--Documentation/RCU/lockdep.txt28
-rw-r--r--Documentation/RCU/whatisRCU.txt6
-rw-r--r--Documentation/input/multi-touch-protocol.txt23
-rw-r--r--Documentation/kernel-parameters.txt5
-rw-r--r--Documentation/networking/x25-iface.txt16
-rw-r--r--MAINTAINERS15
-rw-r--r--Makefile4
-rw-r--r--arch/arm/boot/compressed/head.S2
-rw-r--r--arch/arm/include/asm/highmem.h15
-rw-r--r--arch/arm/include/asm/kmap_types.h1
-rw-r--r--arch/arm/include/asm/ucontext.h23
-rw-r--r--arch/arm/include/asm/user.h12
-rw-r--r--arch/arm/kernel/signal.c93
-rw-r--r--arch/arm/mach-at91/Makefile4
-rw-r--r--arch/arm/mach-at91/pm_slowclock.S16
-rw-r--r--arch/arm/mach-bcmring/dma.c13
-rw-r--r--arch/arm/mach-ep93xx/gpio.c6
-rw-r--r--arch/arm/mach-mx3/Kconfig10
-rw-r--r--arch/arm/mach-mx3/clock-imx31.c5
-rw-r--r--arch/arm/mach-mx3/devices.c19
-rw-r--r--arch/arm/mach-mx3/devices.h3
-rw-r--r--arch/arm/mach-mx3/mach-armadillo5x0.c166
-rw-r--r--arch/arm/mach-mx3/mach-mx31_3ds.c116
-rw-r--r--arch/arm/mach-mx3/mach-pcm037.c1
-rw-r--r--arch/arm/mach-mx3/mx31lite-db.c2
-rw-r--r--arch/arm/mach-mx5/clock-mx51.c2
-rw-r--r--arch/arm/mach-mx5/cpu.c53
-rw-r--r--arch/arm/mach-mx5/mm.c32
-rw-r--r--arch/arm/mm/copypage-v6.c9
-rw-r--r--arch/arm/mm/dma-mapping.c5
-rw-r--r--arch/arm/mm/flush.c25
-rw-r--r--arch/arm/mm/highmem.c87
-rw-r--r--arch/arm/mm/mmu.c14
-rw-r--r--arch/arm/plat-mxc/include/mach/board-mx31_3ds.h (renamed from arch/arm/plat-mxc/include/mach/board-mx31pdk.h)6
-rw-r--r--arch/arm/plat-mxc/include/mach/mx51.h33
-rw-r--r--arch/arm/plat-mxc/include/mach/uncompress.h4
-rw-r--r--arch/arm/vfp/vfpmodule.c31
-rw-r--r--arch/m68k/include/asm/atomic_mm.h8
-rw-r--r--arch/m68k/include/asm/sigcontext.h4
-rw-r--r--arch/mips/alchemy/devboards/db1200/setup.c40
-rw-r--r--arch/mips/ar7/platform.c3
-rw-r--r--arch/mips/bcm63xx/boards/board_bcm963xx.c231
-rw-r--r--arch/mips/bcm63xx/cpu.c5
-rw-r--r--arch/mips/bcm63xx/dev-uart.c66
-rw-r--r--arch/mips/bcm63xx/gpio.c4
-rw-r--r--arch/mips/cavium-octeon/setup.c82
-rw-r--r--arch/mips/cavium-octeon/smp.c8
-rw-r--r--arch/mips/configs/bigsur_defconfig680
-rw-r--r--arch/mips/include/asm/abi.h6
-rw-r--r--arch/mips/include/asm/elf.h5
-rw-r--r--arch/mips/include/asm/fpu_emulator.h6
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h15
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h6
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h4
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mach-sibyte/war.h6
-rw-r--r--arch/mips/include/asm/mmu.h5
-rw-r--r--arch/mips/include/asm/mmu_context.h2
-rw-r--r--arch/mips/include/asm/page.h6
-rw-r--r--arch/mips/include/asm/processor.h11
-rw-r--r--arch/mips/include/asm/stackframe.h19
-rw-r--r--arch/mips/include/asm/uasm.h2
-rw-r--r--arch/mips/include/asm/vdso.h29
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/cpufreq/loongson2_clock.c4
-rw-r--r--arch/mips/kernel/process.c7
-rw-r--r--arch/mips/kernel/signal-common.h5
-rw-r--r--arch/mips/kernel/signal.c86
-rw-r--r--arch/mips/kernel/signal32.c55
-rw-r--r--arch/mips/kernel/signal_n32.c26
-rw-r--r--arch/mips/kernel/smtc.c2
-rw-r--r--arch/mips/kernel/syscall.c6
-rw-r--r--arch/mips/kernel/traps.c2
-rw-r--r--arch/mips/kernel/vdso.c112
-rw-r--r--arch/mips/lib/delay.c4
-rw-r--r--arch/mips/lib/libgcc.h3
-rw-r--r--arch/mips/mm/cache.c2
-rw-r--r--arch/mips/mm/tlbex.c22
-rw-r--r--arch/mips/mm/uasm.c23
-rw-r--r--arch/mips/pci/ops-loongson2.c10
-rw-r--r--arch/mips/sibyte/sb1250/setup.c15
-rw-r--r--arch/mips/sibyte/swarm/platform.c54
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc/Kconfig.debug5
-rw-r--r--arch/sparc/include/asm/cpudata_64.h2
-rw-r--r--arch/sparc/include/asm/irqflags_64.h23
-rw-r--r--arch/sparc/include/asm/thread_info_64.h2
-rw-r--r--arch/sparc/kernel/Makefile10
-rw-r--r--arch/sparc/kernel/ftrace.c60
-rw-r--r--arch/sparc/kernel/irq_64.c31
-rw-r--r--arch/sparc/kernel/kgdb_64.c3
-rw-r--r--arch/sparc/kernel/kstack.h19
-rw-r--r--arch/sparc/kernel/nmi.c10
-rw-r--r--arch/sparc/kernel/pci_common.c11
-rw-r--r--arch/sparc/kernel/pcr.c3
-rw-r--r--arch/sparc/kernel/rtrap_64.S12
-rw-r--r--arch/sparc/kernel/smp_64.c11
-rw-r--r--arch/sparc/kernel/time_64.c4
-rw-r--r--arch/sparc/kernel/traps_64.c26
-rw-r--r--arch/sparc/kernel/unaligned_64.c6
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S5
-rw-r--r--arch/sparc/lib/mcount.S159
-rw-r--r--arch/x86/ia32/ia32entry.S2
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h3
-rw-r--r--arch/x86/include/asm/lguest_hcall.h29
-rw-r--r--arch/x86/kernel/amd_iommu.c20
-rw-r--r--arch/x86/kernel/amd_iommu_init.c48
-rw-r--r--arch/x86/kernel/aperture_64.c15
-rw-r--r--arch/x86/kernel/crash.c6
-rw-r--r--arch/x86/kernel/dumpstack.h8
-rw-r--r--arch/x86/kernel/pci-gart_64.c3
-rw-r--r--arch/x86/lguest/boot.c61
-rw-r--r--arch/x86/lguest/i386_head.S2
-rw-r--r--drivers/acpi/acpica/exprep.c17
-rw-r--r--drivers/atm/atmtcp.c6
-rw-r--r--drivers/char/agp/intel-agp.c3
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c8
-rw-r--r--drivers/firewire/core-cdev.c23
-rw-r--r--drivers/gpu/drm/drm_stub.c4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c132
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c10
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c8
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c68
-rw-r--r--drivers/gpu/drm/i915/intel_display.c96
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c256
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h18
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c92
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c86
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c81
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c22
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c6
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c731
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c96
-rw-r--r--drivers/gpu/drm/radeon/atom.c10
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/r100.c21
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h1
-rw-r--r--drivers/gpu/drm/radeon/r300.c15
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c53
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h3
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r3002
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r4202
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rs6002
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv5153
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/hwmon/applesmc.c18
-rw-r--r--drivers/hwmon/it87.c32
-rw-r--r--drivers/hwmon/sht15.c13
-rw-r--r--drivers/input/input.c9
-rw-r--r--drivers/input/keyboard/matrix_keypad.c4
-rw-r--r--drivers/input/mouse/alps.c1
-rw-r--r--drivers/input/mouse/bcm5974.c1
-rw-r--r--drivers/input/serio/i8042.c2
-rw-r--r--drivers/input/sparse-keymap.c52
-rw-r--r--drivers/input/tablet/wacom_sys.c12
-rw-r--r--drivers/input/tablet/wacom_wac.c163
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c5
-rw-r--r--drivers/isdn/gigaset/capi.c2
-rw-r--r--drivers/isdn/gigaset/common.c2
-rw-r--r--drivers/isdn/gigaset/gigaset.h3
-rw-r--r--drivers/isdn/gigaset/i4l.c1
-rw-r--r--drivers/isdn/gigaset/interface.c1
-rw-r--r--drivers/isdn/gigaset/proc.c1
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c3
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c4
-rw-r--r--drivers/isdn/i4l/isdn_x25iface.c17
-rw-r--r--drivers/lguest/lguest_device.c4
-rw-r--r--drivers/lguest/x86/core.c12
-rw-r--r--drivers/net/3c507.c1
-rw-r--r--drivers/net/3c523.c1
-rw-r--r--drivers/net/8139too.c2
-rw-r--r--drivers/net/Kconfig14
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/arm/ks8695net.c1
-rw-r--r--drivers/net/bcm63xx_enet.c1
-rw-r--r--drivers/net/bnx2.c70
-rw-r--r--drivers/net/bnx2.h1
-rw-r--r--drivers/net/bnx2x.h2
-rw-r--r--drivers/net/bnx2x_main.c28
-rw-r--r--drivers/net/bonding/bond_main.c99
-rw-r--r--drivers/net/caif/caif_serial.c5
-rw-r--r--drivers/net/can/usb/ems_usb.c8
-rw-r--r--drivers/net/cxgb3/ael1002.c2
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/cxgb4/cxgb4.h4
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c15
-rw-r--r--drivers/net/cxgb4/sge.c11
-rw-r--r--drivers/net/cxgb4/t4_hw.c86
-rw-r--r--drivers/net/cxgb4/t4_msg.h1
-rw-r--r--drivers/net/e100.c10
-rw-r--r--drivers/net/e1000/e1000.h37
-rw-r--r--drivers/net/e1000/e1000_ethtool.c85
-rw-r--r--drivers/net/e1000/e1000_hw.c356
-rw-r--r--drivers/net/e1000/e1000_hw.h1
-rw-r--r--drivers/net/e1000/e1000_main.c367
-rw-r--r--drivers/net/e1000/e1000_osdep.h14
-rw-r--r--drivers/net/e1000/e1000_param.c104
-rw-r--r--drivers/net/e1000e/82571.c24
-rw-r--r--drivers/net/e1000e/defines.h2
-rw-r--r--drivers/net/e1000e/e1000.h7
-rw-r--r--drivers/net/e1000e/ethtool.c39
-rw-r--r--drivers/net/e1000e/ich8lan.c30
-rw-r--r--drivers/net/e1000e/netdev.c627
-rw-r--r--drivers/net/e1000e/param.c5
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c50
-rw-r--r--drivers/net/ehea/ehea_qmr.c43
-rw-r--r--drivers/net/ehea/ehea_qmr.h14
-rw-r--r--drivers/net/ethoc.c1
-rw-r--r--drivers/net/forcedeth.c235
-rw-r--r--drivers/net/fsl_pq_mdio.c20
-rw-r--r--drivers/net/gianfar.c12
-rw-r--r--drivers/net/igb/e1000_defines.h4
-rw-r--r--drivers/net/igb/e1000_mac.c27
-rw-r--r--drivers/net/igb/igb.h7
-rw-r--r--drivers/net/igb/igb_ethtool.c8
-rw-r--r--drivers/net/igb/igb_main.c487
-rw-r--r--drivers/net/igbvf/netdev.c76
-rw-r--r--drivers/net/ixgb/ixgb.h8
-rw-r--r--drivers/net/ixgb/ixgb_ee.c14
-rw-r--r--drivers/net/ixgb/ixgb_hw.c147
-rw-r--r--drivers/net/ixgb/ixgb_hw.h12
-rw-r--r--drivers/net/ixgb/ixgb_main.c101
-rw-r--r--drivers/net/ixgb/ixgb_osdep.h16
-rw-r--r--drivers/net/ixgb/ixgb_param.c31
-rw-r--r--drivers/net/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c67
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c138
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c479
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c137
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h7
-rw-r--r--drivers/net/ixgbevf/defines.h12
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c84
-rw-r--r--drivers/net/ixgbevf/vf.c3
-rw-r--r--drivers/net/ks8842.c53
-rw-r--r--drivers/net/ks8851.c366
-rw-r--r--drivers/net/ks8851.h14
-rw-r--r--drivers/net/lib8390.c12
-rw-r--r--drivers/net/macvtap.c46
-rw-r--r--drivers/net/netconsole.c15
-rw-r--r--drivers/net/niu.c52
-rw-r--r--drivers/net/niu.h7
-rw-r--r--drivers/net/octeon/octeon_mgmt.c57
-rw-r--r--drivers/net/pcmcia/3c574_cs.c7
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c31
-rw-r--r--drivers/net/phy/Kconfig5
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/micrel.c113
-rw-r--r--drivers/net/ppp_generic.c34
-rw-r--r--drivers/net/pppoe.c8
-rw-r--r--drivers/net/qlcnic/qlcnic.h8
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c8
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h28
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c15
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c119
-rw-r--r--drivers/net/r8169.c32
-rw-r--r--drivers/net/sb1250-mac.c331
-rw-r--r--drivers/net/sfc/efx.c135
-rw-r--r--drivers/net/sfc/efx.h4
-rw-r--r--drivers/net/sfc/ethtool.c6
-rw-r--r--drivers/net/sfc/falcon.c20
-rw-r--r--drivers/net/sfc/falcon_boards.c13
-rw-r--r--drivers/net/sfc/falcon_xmac.c22
-rw-r--r--drivers/net/sfc/mcdi.c32
-rw-r--r--drivers/net/sfc/mcdi_mac.c25
-rw-r--r--drivers/net/sfc/mcdi_pcol.h71
-rw-r--r--drivers/net/sfc/mcdi_phy.c152
-rw-r--r--drivers/net/sfc/net_driver.h76
-rw-r--r--drivers/net/sfc/nic.c114
-rw-r--r--drivers/net/sfc/nic.h5
-rw-r--r--drivers/net/sfc/selftest.c8
-rw-r--r--drivers/net/sfc/selftest.h4
-rw-r--r--drivers/net/sfc/siena.c32
-rw-r--r--drivers/net/sfc/tx.c61
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/sky2.c134
-rw-r--r--drivers/net/sky2.h21
-rw-r--r--drivers/net/smc9194.c3
-rw-r--r--drivers/net/sunhme.c1
-rw-r--r--drivers/net/tehuti.c1
-rw-r--r--drivers/net/tg3.c2
-rw-r--r--drivers/net/tun.c51
-rw-r--r--drivers/net/usb/Kconfig21
-rw-r--r--drivers/net/usb/Makefile2
-rw-r--r--drivers/net/usb/cdc_ether.c114
-rw-r--r--drivers/net/usb/dm9601.c2
-rw-r--r--drivers/net/usb/ipheth.c569
-rw-r--r--drivers/net/usb/kaweth.c1
-rw-r--r--drivers/net/usb/sierra_net.c1004
-rw-r--r--drivers/net/wan/cycx_x25.c13
-rw-r--r--drivers/net/wan/hdlc_ppp.c6
-rw-r--r--drivers/net/wan/hdlc_x25.c12
-rw-r--r--drivers/net/wan/lapbether.c12
-rw-r--r--drivers/net/wan/x25_asy.c12
-rw-r--r--drivers/net/wireless/Kconfig91
-rw-r--r--drivers/net/wireless/at76c50x-usb.c1
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c4
-rw-r--r--drivers/net/wireless/ath/ath.h14
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c25
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile16
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c217
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_initvals.h742
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c1374
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9001_initvals.h1254
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c1000
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c598
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_initvals.h (renamed from drivers/net/wireless/ath/ath9k/initvals.h)2060
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c480
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c535
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.h572
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c803
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c1860
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h323
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c205
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_initvals.h1784
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c614
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.h120
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c1134
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h847
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h24
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c1089
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h19
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h25
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c148
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h21
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c112
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c104
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c66
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.h24
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h280
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1786
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h261
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c83
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c492
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h67
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c102
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.c978
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h584
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c518
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h167
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h23
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c490
-rw-r--r--drivers/net/wireless/b43/main.c21
-rw-r--r--drivers/net/wireless/b43/xmit.c1
-rw-r--r--drivers/net/wireless/b43legacy/main.c21
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c1
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c500
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h60
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c74
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c84
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c110
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c834
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h56
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c416
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c67
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c58
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c36
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c65
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c57
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c764
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h40
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h80
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c538
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c93
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Makefile2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/bus.h2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debug.h7
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debugfs.c110
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.c17
-rw-r--r--drivers/net/wireless/iwmc3200wifi/trace.h4
-rw-r--r--drivers/net/wireless/iwmc3200wifi/tx.c4
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c105
-rw-r--r--drivers/net/wireless/libertas_tf/cmd.c203
-rw-r--r--drivers/net/wireless/libertas_tf/deb_defs.h104
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c251
-rw-r--r--drivers/net/wireless/libertas_tf/libertas_tf.h2
-rw-r--r--drivers/net/wireless/libertas_tf/main.c91
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c34
-rw-r--r--drivers/net/wireless/mwl8k.c6
-rw-r--r--drivers/net/wireless/orinoco/Kconfig7
-rw-r--r--drivers/net/wireless/orinoco/Makefile4
-rw-r--r--drivers/net/wireless/orinoco/airport.c8
-rw-r--r--drivers/net/wireless/orinoco/cfg.c90
-rw-r--r--drivers/net/wireless/orinoco/fw.c10
-rw-r--r--drivers/net/wireless/orinoco/hermes.c286
-rw-r--r--drivers/net/wireless/orinoco/hermes.h62
-rw-r--r--drivers/net/wireless/orinoco/hermes_dld.c243
-rw-r--r--drivers/net/wireless/orinoco/hw.c89
-rw-r--r--drivers/net/wireless/orinoco/main.c137
-rw-r--r--drivers/net/wireless/orinoco/main.h12
-rw-r--r--drivers/net/wireless/orinoco/orinoco.h32
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c6
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c1800
-rw-r--r--drivers/net/wireless/orinoco/scan.c4
-rw-r--r--drivers/net/wireless/orinoco/spectrum_cs.c7
-rw-r--r--drivers/net/wireless/orinoco/wext.c189
-rw-r--r--drivers/net/wireless/p54/main.c3
-rw-r--r--drivers/net/wireless/p54/p54pci.c20
-rw-r--r--drivers/net/wireless/p54/txrx.c1
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig4
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h11
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c91
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c40
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c39
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c2
-rw-r--r--drivers/net/wireless/rtl818x/Kconfig88
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c13
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c10
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c65
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_reg.h7
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.c6
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_sdio.c96
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.c8
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c14
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_conf.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_main.c98
-rw-r--r--drivers/net/xilinx_emaclite.c4
-rw-r--r--drivers/pcmcia/cistpl.c9
-rw-r--r--drivers/pcmcia/db1xxx_ss.c4
-rw-r--r--drivers/pcmcia/ds.c22
-rw-r--r--drivers/pcmcia/pcmcia_resource.c10
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c16
-rw-r--r--drivers/scsi/iscsi_tcp.c4
-rw-r--r--drivers/serial/serial_cs.c9
-rw-r--r--drivers/ssb/driver_chipcommon.c2
-rw-r--r--drivers/ssb/driver_pcicore.c29
-rw-r--r--drivers/ssb/main.c3
-rw-r--r--drivers/ssb/pci.c14
-rw-r--r--drivers/ssb/sprom.c14
-rw-r--r--drivers/watchdog/Kconfig8
-rw-r--r--drivers/watchdog/booke_wdt.c2
-rw-r--r--drivers/watchdog/max63xx_wdt.c7
-rw-r--r--fs/btrfs/extent-tree.c20
-rw-r--r--fs/btrfs/volumes.c6
-rw-r--r--fs/ceph/addr.c62
-rw-r--r--fs/ceph/caps.c42
-rw-r--r--fs/ceph/dir.c7
-rw-r--r--fs/ceph/inode.c10
-rw-r--r--fs/ceph/messenger.c9
-rw-r--r--fs/ceph/osdmap.c180
-rw-r--r--fs/ceph/osdmap.h1
-rw-r--r--fs/ceph/rados.h6
-rw-r--r--fs/ceph/snap.c26
-rw-r--r--fs/ceph/super.h3
-rw-r--r--fs/ecryptfs/crypto.c37
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h13
-rw-r--r--fs/ecryptfs/inode.c129
-rw-r--r--fs/ecryptfs/mmap.c38
-rw-r--r--fs/ecryptfs/super.c1
-rw-r--r--fs/ext2/symlink.c2
-rw-r--r--fs/ext3/symlink.c2
-rw-r--r--fs/fcntl.c66
-rw-r--r--fs/nfs/client.c3
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/inode.c8
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nfs/write.c44
-rw-r--r--fs/nilfs2/alloc.c2
-rw-r--r--fs/nilfs2/btree.c2
-rw-r--r--fs/nilfs2/ioctl.c2
-rw-r--r--fs/quota/Kconfig8
-rw-r--r--fs/quota/dquot.c28
-rw-r--r--fs/udf/balloc.c10
-rw-r--r--fs/udf/file.c2
-rw-r--r--fs/udf/inode.c2
-rw-r--r--fs/udf/namei.c9
-rw-r--r--fs/udf/udfdecl.h3
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c4
-rw-r--r--fs/xfs/xfs_log.c38
-rw-r--r--include/drm/drm_pciids.h1
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/caif/caif_socket.h5
-rw-r--r--include/linux/dcbnl.h2
-rw-r--r--include/linux/fib_rules.h8
-rw-r--r--include/linux/filter.h3
-rw-r--r--include/linux/firewire-cdev.h78
-rw-r--r--include/linux/firewire-constants.h29
-rw-r--r--include/linux/fs.h12
-rw-r--r--include/linux/ieee80211.h1
-rw-r--r--include/linux/if.h2
-rw-r--r--include/linux/if_tun.h2
-rw-r--r--include/linux/if_x25.h26
-rw-r--r--include/linux/in6.h5
-rw-r--r--include/linux/input/matrix_keypad.h2
-rw-r--r--include/linux/ipv6.h16
-rw-r--r--include/linux/ks8842.h34
-rw-r--r--include/linux/net.h14
-rw-r--r--include/linux/netdevice.h27
-rw-r--r--include/linux/netpoll.h13
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/nl80211.h5
-rw-r--r--include/linux/notifier.h1
-rw-r--r--include/linux/pci_regs.h3
-rw-r--r--include/linux/rculist.h29
-rw-r--r--include/linux/rcupdate.h65
-rw-r--r--include/linux/rtnetlink.h6
-rw-r--r--include/linux/skbuff.h21
-rw-r--r--include/linux/spi/wl12xx.h2
-rw-r--r--include/linux/ssb/ssb.h4
-rw-r--r--include/linux/ssb/ssb_driver_chipcommon.h15
-rw-r--r--include/linux/ssb/ssb_regs.h239
-rw-r--r--include/net/af_unix.h20
-rw-r--r--include/net/caif/caif_dev.h17
-rw-r--r--include/net/caif/cfcnfg.h17
-rw-r--r--include/net/caif/cfctrl.h7
-rw-r--r--include/net/caif/cfsrvl.h22
-rw-r--r--include/net/cfg80211.h6
-rw-r--r--include/net/fib_rules.h2
-rw-r--r--include/net/inet_sock.h37
-rw-r--r--include/net/inet_timewait_sock.h4
-rw-r--r--include/net/ip.h1
-rw-r--r--include/net/ipv6.h5
-rw-r--r--include/net/mac80211.h38
-rw-r--r--include/net/mld.h75
-rw-r--r--include/net/netns/generic.h9
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/sctp/command.h1
-rw-r--r--include/net/sctp/sctp.h3
-rw-r--r--include/net/sctp/sm.h2
-rw-r--r--include/net/sctp/structs.h67
-rw-r--r--include/net/snmp.h2
-rw-r--r--include/net/sock.h132
-rw-r--r--include/net/tcp.h11
-rw-r--r--include/net/transp_v6.h3
-rw-r--r--include/net/x25device.h1
-rw-r--r--kernel/power/user.c2
-rw-r--r--kernel/rcupdate.c7
-rw-r--r--kernel/sysctl_binary.c1
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/dma-debug.c2
-rw-r--r--lib/vsprintf.c10
-rw-r--r--mm/mmap.c110
-rw-r--r--mm/rmap.c24
-rw-r--r--net/atm/common.c30
-rw-r--r--net/atm/signaling.c2
-rw-r--r--net/atm/svc.c62
-rw-r--r--net/ax25/af_ax25.c8
-rw-r--r--net/bluetooth/af_bluetooth.c6
-rw-r--r--net/bluetooth/bnep/core.c8
-rw-r--r--net/bluetooth/bnep/netdev.c6
-rw-r--r--net/bluetooth/cmtp/cmtp.h2
-rw-r--r--net/bluetooth/cmtp/core.c4
-rw-r--r--net/bluetooth/hidp/core.c10
-rw-r--r--net/bluetooth/hidp/hidp.h4
-rw-r--r--net/bluetooth/l2cap.c9
-rw-r--r--net/bluetooth/rfcomm/sock.c8
-rw-r--r--net/bluetooth/sco.c4
-rw-r--r--net/bridge/Kconfig6
-rw-r--r--net/bridge/br_device.c62
-rw-r--r--net/bridge/br_forward.c39
-rw-r--r--net/bridge/br_if.c25
-rw-r--r--net/bridge/br_multicast.c662
-rw-r--r--net/bridge/br_private.h32
-rw-r--r--net/caif/caif_dev.c25
-rw-r--r--net/caif/caif_socket.c1795
-rw-r--r--net/caif/cfcnfg.c195
-rw-r--r--net/caif/cfctrl.c95
-rw-r--r--net/caif/cfmuxl.c7
-rw-r--r--net/caif/cfsrvl.c7
-rw-r--r--net/caif/chnl_net.c130
-rw-r--r--net/core/datagram.c21
-rw-r--r--net/core/dev.c255
-rw-r--r--net/core/fib_rules.c2
-rw-r--r--net/core/filter.c7
-rw-r--r--net/core/net_namespace.c95
-rw-r--r--net/core/netpoll.c26
-rw-r--r--net/core/rtnetlink.c19
-rw-r--r--net/core/skbuff.c29
-rw-r--r--net/core/sock.c63
-rw-r--r--net/core/stream.c22
-rw-r--r--net/dccp/output.c14
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/decnet/af_decnet.c26
-rw-r--r--net/decnet/dn_rules.c4
-rw-r--r--net/ethernet/eth.c4
-rw-r--r--net/ieee802154/af_ieee802154.c3
-rw-r--r--net/ipv4/af_inet.c22
-rw-r--r--net/ipv4/fib_rules.c4
-rw-r--r--net/ipv4/fib_trie.c4
-rw-r--r--net/ipv4/inet_connection_sock.c4
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ip_sockglue.c16
-rw-r--r--net/ipv4/ipmr.c110
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c137
-rw-r--r--net/ipv4/tcp.c19
-rw-r--r--net/ipv4/tcp_input.c1
-rw-r--r--net/ipv4/tcp_ipv4.c6
-rw-r--r--net/ipv4/tcp_output.c9
-rw-r--r--net/ipv4/tcp_timer.c4
-rw-r--r--net/ipv4/udp.c30
-rw-r--r--net/ipv6/addrconf.c19
-rw-r--r--net/ipv6/af_inet6.c3
-rw-r--r--net/ipv6/datagram.c108
-rw-r--r--net/ipv6/fib6_rules.c4
-rw-r--r--net/ipv6/icmp.c5
-rw-r--r--net/ipv6/ip6_fib.c3
-rw-r--r--net/ipv6/ip6_flowlabel.c3
-rw-r--r--net/ipv6/ip6_output.c33
-rw-r--r--net/ipv6/ipv6_sockglue.c61
-rw-r--r--net/ipv6/mcast.c135
-rw-r--r--net/ipv6/raw.c14
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/tcp_ipv6.c25
-rw-r--r--net/ipv6/udp.c41
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--net/irda/af_irda.c14
-rw-r--r--net/iucv/af_iucv.c21
-rw-r--r--net/l2tp/l2tp_core.c29
-rw-r--r--net/l2tp/l2tp_eth.c29
-rw-r--r--net/llc/af_llc.c12
-rw-r--r--net/mac80211/agg-rx.c18
-rw-r--r--net/mac80211/agg-tx.c3
-rw-r--r--net/mac80211/cfg.c30
-rw-r--r--net/mac80211/debugfs_sta.c65
-rw-r--r--net/mac80211/driver-ops.h14
-rw-r--r--net/mac80211/driver-trace.h9
-rw-r--r--net/mac80211/ibss.c27
-rw-r--r--net/mac80211/ieee80211_i.h3
-rw-r--r--net/mac80211/key.c1
-rw-r--r--net/mac80211/main.c19
-rw-r--r--net/mac80211/mlme.c28
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/mac80211/scan.c53
-rw-r--r--net/mac80211/sta_info.c30
-rw-r--r--net/mac80211/status.c7
-rw-r--r--net/mac80211/tx.c12
-rw-r--r--net/mac80211/work.c28
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/netrom/af_netrom.c8
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/phonet/pep.c8
-rw-r--r--net/phonet/pn_dev.c23
-rw-r--r--net/phonet/socket.c2
-rw-r--r--net/rds/af_rds.c2
-rw-r--r--net/rds/rdma_transport.c2
-rw-r--r--net/rds/rds.h2
-rw-r--r--net/rds/recv.c2
-rw-r--r--net/rds/send.c2
-rw-r--r--net/rose/af_rose.c8
-rw-r--r--net/rxrpc/af_rxrpc.c12
-rw-r--r--net/rxrpc/ar-recvmsg.c6
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_sfq.c10
-rw-r--r--net/sctp/Kconfig12
-rw-r--r--net/sctp/Makefile3
-rw-r--r--net/sctp/associola.c19
-rw-r--r--net/sctp/chunk.c4
-rw-r--r--net/sctp/endpointola.c3
-rw-r--r--net/sctp/output.c27
-rw-r--r--net/sctp/outqueue.c94
-rw-r--r--net/sctp/probe.c214
-rw-r--r--net/sctp/protocol.c7
-rw-r--r--net/sctp/sm_make_chunk.c118
-rw-r--r--net/sctp/sm_sideeffect.c34
-rw-r--r--net/sctp/sm_statefuns.c8
-rw-r--r--net/sctp/socket.c44
-rw-r--r--net/sctp/transport.c61
-rw-r--r--net/socket.c116
-rw-r--r--net/sunrpc/svcsock.c24
-rw-r--r--net/sunrpc/xprt.c2
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c5
-rw-r--r--net/tipc/socket.c26
-rw-r--r--net/unix/af_unix.c25
-rw-r--r--net/unix/garbage.c13
-rw-r--r--net/wireless/core.c3
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--net/wireless/sme.c16
-rw-r--r--net/x25/af_x25.c9
-rw-r--r--net/x25/x25_dev.c36
-rw-r--r--net/xfrm/xfrm_hash.h3
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--security/selinux/ss/avtab.h2
-rw-r--r--sound/arm/aaci.c7
-rw-r--r--sound/pci/hda/hda_intel.c1
-rw-r--r--sound/pci/hda/patch_realtek.c170
-rw-r--r--sound/pci/hda/patch_via.c41
-rw-r--r--sound/soc/codecs/wm2000.c1
-rw-r--r--sound/soc/imx/imx-pcm-dma-mx2.c15
-rw-r--r--sound/soc/imx/imx-pcm-fiq.c55
-rw-r--r--sound/soc/imx/imx-ssi.c3
-rw-r--r--sound/usb/usbmidi.c24
744 files changed, 38108 insertions, 17661 deletions
diff --git a/Documentation/RCU/NMI-RCU.txt b/Documentation/RCU/NMI-RCU.txt
index a6d32e65d222..a8536cb88091 100644
--- a/Documentation/RCU/NMI-RCU.txt
+++ b/Documentation/RCU/NMI-RCU.txt
@@ -34,7 +34,7 @@ NMI handler.
34 cpu = smp_processor_id(); 34 cpu = smp_processor_id();
35 ++nmi_count(cpu); 35 ++nmi_count(cpu);
36 36
37 if (!rcu_dereference(nmi_callback)(regs, cpu)) 37 if (!rcu_dereference_sched(nmi_callback)(regs, cpu))
38 default_do_nmi(regs); 38 default_do_nmi(regs);
39 39
40 nmi_exit(); 40 nmi_exit();
@@ -47,12 +47,13 @@ function pointer. If this handler returns zero, do_nmi() invokes the
47default_do_nmi() function to handle a machine-specific NMI. Finally, 47default_do_nmi() function to handle a machine-specific NMI. Finally,
48preemption is restored. 48preemption is restored.
49 49
50Strictly speaking, rcu_dereference() is not needed, since this code runs 50In theory, rcu_dereference_sched() is not needed, since this code runs
51only on i386, which does not need rcu_dereference() anyway. However, 51only on i386, which in theory does not need rcu_dereference_sched()
52it is a good documentation aid, particularly for anyone attempting to 52anyway. However, in practice it is a good documentation aid, particularly
53do something similar on Alpha. 53for anyone attempting to do something similar on Alpha or on systems
54with aggressive optimizing compilers.
54 55
55Quick Quiz: Why might the rcu_dereference() be necessary on Alpha, 56Quick Quiz: Why might the rcu_dereference_sched() be necessary on Alpha,
56 given that the code referenced by the pointer is read-only? 57 given that the code referenced by the pointer is read-only?
57 58
58 59
@@ -99,17 +100,21 @@ invoke irq_enter() and irq_exit() on NMI entry and exit, respectively.
99 100
100Answer to Quick Quiz 101Answer to Quick Quiz
101 102
102 Why might the rcu_dereference() be necessary on Alpha, given 103 Why might the rcu_dereference_sched() be necessary on Alpha, given
103 that the code referenced by the pointer is read-only? 104 that the code referenced by the pointer is read-only?
104 105
105 Answer: The caller to set_nmi_callback() might well have 106 Answer: The caller to set_nmi_callback() might well have
106 initialized some data that is to be used by the 107 initialized some data that is to be used by the new NMI
107 new NMI handler. In this case, the rcu_dereference() 108 handler. In this case, the rcu_dereference_sched() would
108 would be needed, because otherwise a CPU that received 109 be needed, because otherwise a CPU that received an NMI
109 an NMI just after the new handler was set might see 110 just after the new handler was set might see the pointer
110 the pointer to the new NMI handler, but the old 111 to the new NMI handler, but the old pre-initialized
111 pre-initialized version of the handler's data. 112 version of the handler's data.
112 113
113 More important, the rcu_dereference() makes it clear 114 This same sad story can happen on other CPUs when using
114 to someone reading the code that the pointer is being 115 a compiler with aggressive pointer-value speculation
115 protected by RCU. 116 optimizations.
117
118 More important, the rcu_dereference_sched() makes it
119 clear to someone reading the code that the pointer is
120 being protected by RCU-sched.
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index cbc180f90194..790d1a812376 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -260,7 +260,8 @@ over a rather long period of time, but improvements are always welcome!
260 The reason that it is permissible to use RCU list-traversal 260 The reason that it is permissible to use RCU list-traversal
261 primitives when the update-side lock is held is that doing so 261 primitives when the update-side lock is held is that doing so
262 can be quite helpful in reducing code bloat when common code is 262 can be quite helpful in reducing code bloat when common code is
263 shared between readers and updaters. 263 shared between readers and updaters. Additional primitives
264 are provided for this case, as discussed in lockdep.txt.
264 265
26510. Conversely, if you are in an RCU read-side critical section, 26610. Conversely, if you are in an RCU read-side critical section,
266 and you don't hold the appropriate update-side lock, you -must- 267 and you don't hold the appropriate update-side lock, you -must-
@@ -344,8 +345,8 @@ over a rather long period of time, but improvements are always welcome!
344 requiring SRCU's read-side deadlock immunity or low read-side 345 requiring SRCU's read-side deadlock immunity or low read-side
345 realtime latency. 346 realtime latency.
346 347
347 Note that, rcu_assign_pointer() and rcu_dereference() relate to 348 Note that, rcu_assign_pointer() relates to SRCU just as they do
348 SRCU just as they do to other forms of RCU. 349 to other forms of RCU.
349 350
35015. The whole point of call_rcu(), synchronize_rcu(), and friends 35115. The whole point of call_rcu(), synchronize_rcu(), and friends
351 is to wait until all pre-existing readers have finished before 352 is to wait until all pre-existing readers have finished before
diff --git a/Documentation/RCU/lockdep.txt b/Documentation/RCU/lockdep.txt
index fe24b58627bd..d7a49b2f6994 100644
--- a/Documentation/RCU/lockdep.txt
+++ b/Documentation/RCU/lockdep.txt
@@ -32,9 +32,20 @@ checking of rcu_dereference() primitives:
32 srcu_dereference(p, sp): 32 srcu_dereference(p, sp):
33 Check for SRCU read-side critical section. 33 Check for SRCU read-side critical section.
34 rcu_dereference_check(p, c): 34 rcu_dereference_check(p, c):
35 Use explicit check expression "c". 35 Use explicit check expression "c". This is useful in
36 code that is invoked by both readers and updaters.
36 rcu_dereference_raw(p) 37 rcu_dereference_raw(p)
37 Don't check. (Use sparingly, if at all.) 38 Don't check. (Use sparingly, if at all.)
39 rcu_dereference_protected(p, c):
40 Use explicit check expression "c", and omit all barriers
41 and compiler constraints. This is useful when the data
42 structure cannot change, for example, in code that is
43 invoked only by updaters.
44 rcu_access_pointer(p):
45 Return the value of the pointer and omit all barriers,
46 but retain the compiler constraints that prevent duplicating
47 or coalescsing. This is useful when when testing the
48 value of the pointer itself, for example, against NULL.
38 49
39The rcu_dereference_check() check expression can be any boolean 50The rcu_dereference_check() check expression can be any boolean
40expression, but would normally include one of the rcu_read_lock_held() 51expression, but would normally include one of the rcu_read_lock_held()
@@ -59,7 +70,20 @@ In case (1), the pointer is picked up in an RCU-safe manner for vanilla
59RCU read-side critical sections, in case (2) the ->file_lock prevents 70RCU read-side critical sections, in case (2) the ->file_lock prevents
60any change from taking place, and finally, in case (3) the current task 71any change from taking place, and finally, in case (3) the current task
61is the only task accessing the file_struct, again preventing any change 72is the only task accessing the file_struct, again preventing any change
62from taking place. 73from taking place. If the above statement was invoked only from updater
74code, it could instead be written as follows:
75
76 file = rcu_dereference_protected(fdt->fd[fd],
77 lockdep_is_held(&files->file_lock) ||
78 atomic_read(&files->count) == 1);
79
80This would verify cases #2 and #3 above, and furthermore lockdep would
81complain if this was used in an RCU read-side critical section unless one
82of these two cases held. Because rcu_dereference_protected() omits all
83barriers and compiler constraints, it generates better code than do the
84other flavors of rcu_dereference(). On the other hand, it is illegal
85to use rcu_dereference_protected() if either the RCU-protected pointer
86or the RCU-protected data that it points to can change concurrently.
63 87
64There are currently only "universal" versions of the rcu_assign_pointer() 88There are currently only "universal" versions of the rcu_assign_pointer()
65and RCU list-/tree-traversal primitives, which do not (yet) check for 89and RCU list-/tree-traversal primitives, which do not (yet) check for
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 1dc00ee97163..cfaac34c4557 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -840,6 +840,12 @@ SRCU: Initialization/cleanup
840 init_srcu_struct 840 init_srcu_struct
841 cleanup_srcu_struct 841 cleanup_srcu_struct
842 842
843All: lockdep-checked RCU-protected pointer access
844
845 rcu_dereference_check
846 rcu_dereference_protected
847 rcu_access_pointer
848
843See the comment headers in the source code (or the docbook generated 849See the comment headers in the source code (or the docbook generated
844from them) for more information. 850from them) for more information.
845 851
diff --git a/Documentation/input/multi-touch-protocol.txt b/Documentation/input/multi-touch-protocol.txt
index 8490480ce432..c0fc1c75fd88 100644
--- a/Documentation/input/multi-touch-protocol.txt
+++ b/Documentation/input/multi-touch-protocol.txt
@@ -68,6 +68,22 @@ like:
68 SYN_MT_REPORT 68 SYN_MT_REPORT
69 SYN_REPORT 69 SYN_REPORT
70 70
71Here is the sequence after lifting one of the fingers:
72
73 ABS_MT_POSITION_X
74 ABS_MT_POSITION_Y
75 SYN_MT_REPORT
76 SYN_REPORT
77
78And here is the sequence after lifting the remaining finger:
79
80 SYN_MT_REPORT
81 SYN_REPORT
82
83If the driver reports one of BTN_TOUCH or ABS_PRESSURE in addition to the
84ABS_MT events, the last SYN_MT_REPORT event may be omitted. Otherwise, the
85last SYN_REPORT will be dropped by the input core, resulting in no
86zero-finger event reaching userland.
71 87
72Event Semantics 88Event Semantics
73--------------- 89---------------
@@ -217,11 +233,6 @@ where examples can be found.
217difference between the contact position and the approaching tool position 233difference between the contact position and the approaching tool position
218could be used to derive tilt. 234could be used to derive tilt.
219[2] The list can of course be extended. 235[2] The list can of course be extended.
220[3] The multi-touch X driver is currently in the prototyping stage. At the 236[3] Multitouch X driver project: http://bitmath.org/code/multitouch/.
221time of writing (April 2009), the MT protocol is not yet merged, and the
222prototype implements finger matching, basic mouse support and two-finger
223scrolling. The project aims at improving the quality of current multi-touch
224functionality available in the Synaptics X driver, and in addition
225implement more advanced gestures.
226[4] See the section on event computation. 237[4] See the section on event computation.
227[5] See the section on finger tracking. 238[5] See the section on finger tracking.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index e4cbca58536c..e2202e93b148 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -320,11 +320,6 @@ and is between 256 and 4096 characters. It is defined in the file
320 amd_iommu= [HW,X86-84] 320 amd_iommu= [HW,X86-84]
321 Pass parameters to the AMD IOMMU driver in the system. 321 Pass parameters to the AMD IOMMU driver in the system.
322 Possible values are: 322 Possible values are:
323 isolate - enable device isolation (each device, as far
324 as possible, will get its own protection
325 domain) [default]
326 share - put every device behind one IOMMU into the
327 same protection domain
328 fullflush - enable flushing of IO/TLB entries when 323 fullflush - enable flushing of IO/TLB entries when
329 they are unmapped. Otherwise they are 324 they are unmapped. Otherwise they are
330 flushed before they will be reused, which 325 flushed before they will be reused, which
diff --git a/Documentation/networking/x25-iface.txt b/Documentation/networking/x25-iface.txt
index 975cc87ebdd1..78f662ee0622 100644
--- a/Documentation/networking/x25-iface.txt
+++ b/Documentation/networking/x25-iface.txt
@@ -20,23 +20,23 @@ the rest of the skbuff, if any more information does exist.
20Packet Layer to Device Driver 20Packet Layer to Device Driver
21----------------------------- 21-----------------------------
22 22
23First Byte = 0x00 23First Byte = 0x00 (X25_IFACE_DATA)
24 24
25This indicates that the rest of the skbuff contains data to be transmitted 25This indicates that the rest of the skbuff contains data to be transmitted
26over the LAPB link. The LAPB link should already exist before any data is 26over the LAPB link. The LAPB link should already exist before any data is
27passed down. 27passed down.
28 28
29First Byte = 0x01 29First Byte = 0x01 (X25_IFACE_CONNECT)
30 30
31Establish the LAPB link. If the link is already established then the connect 31Establish the LAPB link. If the link is already established then the connect
32confirmation message should be returned as soon as possible. 32confirmation message should be returned as soon as possible.
33 33
34First Byte = 0x02 34First Byte = 0x02 (X25_IFACE_DISCONNECT)
35 35
36Terminate the LAPB link. If it is already disconnected then the disconnect 36Terminate the LAPB link. If it is already disconnected then the disconnect
37confirmation message should be returned as soon as possible. 37confirmation message should be returned as soon as possible.
38 38
39First Byte = 0x03 39First Byte = 0x03 (X25_IFACE_PARAMS)
40 40
41LAPB parameters. To be defined. 41LAPB parameters. To be defined.
42 42
@@ -44,22 +44,22 @@ LAPB parameters. To be defined.
44Device Driver to Packet Layer 44Device Driver to Packet Layer
45----------------------------- 45-----------------------------
46 46
47First Byte = 0x00 47First Byte = 0x00 (X25_IFACE_DATA)
48 48
49This indicates that the rest of the skbuff contains data that has been 49This indicates that the rest of the skbuff contains data that has been
50received over the LAPB link. 50received over the LAPB link.
51 51
52First Byte = 0x01 52First Byte = 0x01 (X25_IFACE_CONNECT)
53 53
54LAPB link has been established. The same message is used for both a LAPB 54LAPB link has been established. The same message is used for both a LAPB
55link connect_confirmation and a connect_indication. 55link connect_confirmation and a connect_indication.
56 56
57First Byte = 0x02 57First Byte = 0x02 (X25_IFACE_DISCONNECT)
58 58
59LAPB link has been terminated. This same message is used for both a LAPB 59LAPB link has been terminated. This same message is used for both a LAPB
60link disconnect_confirmation and a disconnect_indication. 60link disconnect_confirmation and a disconnect_indication.
61 61
62First Byte = 0x03 62First Byte = 0x03 (X25_IFACE_PARAMS)
63 63
64LAPB parameters. To be defined. 64LAPB parameters. To be defined.
65 65
diff --git a/MAINTAINERS b/MAINTAINERS
index a7ff13e7ae23..a73b9b3cffad 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -485,8 +485,8 @@ S: Maintained
485F: drivers/input/mouse/bcm5974.c 485F: drivers/input/mouse/bcm5974.c
486 486
487APPLE SMC DRIVER 487APPLE SMC DRIVER
488M: Nicolas Boichat <nicolas@boichat.ch> 488M: Henrik Rydberg <rydberg@euromail.se>
489L: mactel-linux-devel@lists.sourceforge.net 489L: lm-sensors@lm-sensors.org
490S: Maintained 490S: Maintained
491F: drivers/hwmon/applesmc.c 491F: drivers/hwmon/applesmc.c
492 492
@@ -971,6 +971,16 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
971W: http://www.mcuos.com 971W: http://www.mcuos.com
972S: Maintained 972S: Maintained
973 973
974ARM/U300 MACHINE SUPPORT
975M: Linus Walleij <linus.walleij@stericsson.com>
976L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
977S: Supported
978F: arch/arm/mach-u300/
979F: drivers/i2c/busses/i2c-stu300.c
980F: drivers/rtc/rtc-coh901331.c
981F: drivers/watchdog/coh901327_wdt.c
982F: drivers/dma/coh901318*
983
974ARM/U8500 ARM ARCHITECTURE 984ARM/U8500 ARM ARCHITECTURE
975M: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com> 985M: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
976L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 986L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -3946,6 +3956,7 @@ F: net/rfkill/
3946F: net/wireless/ 3956F: net/wireless/
3947F: include/net/ieee80211* 3957F: include/net/ieee80211*
3948F: include/linux/wireless.h 3958F: include/linux/wireless.h
3959F: include/linux/iw_handler.h
3949F: drivers/net/wireless/ 3960F: drivers/net/wireless/
3950 3961
3951NETWORKING DRIVERS 3962NETWORKING DRIVERS
diff --git a/Makefile b/Makefile
index 67c1001cfbf5..fa1db9001754 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 34 3SUBLEVEL = 34
4EXTRAVERSION = -rc3 4EXTRAVERSION = -rc5
5NAME = Man-Eating Seals of Antiquity 5NAME = Sheep on Meth
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
8# To see a list of typical targets execute "make help" 8# To see a list of typical targets execute "make help"
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 0f23009170a1..6ab6b337a913 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -172,7 +172,7 @@ not_angel:
172 adr r0, LC0 172 adr r0, LC0
173 ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip, sp}) 173 ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip, sp})
174 THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip} ) 174 THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip} )
175 THUMB( ldr sp, [r0, #28] ) 175 THUMB( ldr sp, [r0, #32] )
176 subs r0, r0, r1 @ calculate the delta offset 176 subs r0, r0, r1 @ calculate the delta offset
177 177
178 @ if delta is zero, we are 178 @ if delta is zero, we are
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 7f36d00600b4..feb988a7ec37 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -11,7 +11,11 @@
11 11
12#define kmap_prot PAGE_KERNEL 12#define kmap_prot PAGE_KERNEL
13 13
14#define flush_cache_kmaps() flush_cache_all() 14#define flush_cache_kmaps() \
15 do { \
16 if (cache_is_vivt()) \
17 flush_cache_all(); \
18 } while (0)
15 19
16extern pte_t *pkmap_page_table; 20extern pte_t *pkmap_page_table;
17 21
@@ -21,11 +25,20 @@ extern void *kmap_high(struct page *page);
21extern void *kmap_high_get(struct page *page); 25extern void *kmap_high_get(struct page *page);
22extern void kunmap_high(struct page *page); 26extern void kunmap_high(struct page *page);
23 27
28extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte);
29extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
30
31/*
32 * The following functions are already defined by <linux/highmem.h>
33 * when CONFIG_HIGHMEM is not set.
34 */
35#ifdef CONFIG_HIGHMEM
24extern void *kmap(struct page *page); 36extern void *kmap(struct page *page);
25extern void kunmap(struct page *page); 37extern void kunmap(struct page *page);
26extern void *kmap_atomic(struct page *page, enum km_type type); 38extern void *kmap_atomic(struct page *page, enum km_type type);
27extern void kunmap_atomic(void *kvaddr, enum km_type type); 39extern void kunmap_atomic(void *kvaddr, enum km_type type);
28extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 40extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
29extern struct page *kmap_atomic_to_page(const void *ptr); 41extern struct page *kmap_atomic_to_page(const void *ptr);
42#endif
30 43
31#endif 44#endif
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
index c019949a5189..c4b2ea3fbe42 100644
--- a/arch/arm/include/asm/kmap_types.h
+++ b/arch/arm/include/asm/kmap_types.h
@@ -18,6 +18,7 @@ enum km_type {
18 KM_IRQ1, 18 KM_IRQ1,
19 KM_SOFTIRQ0, 19 KM_SOFTIRQ0,
20 KM_SOFTIRQ1, 20 KM_SOFTIRQ1,
21 KM_L1_CACHE,
21 KM_L2_CACHE, 22 KM_L2_CACHE,
22 KM_TYPE_NR 23 KM_TYPE_NR
23}; 24};
diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h
index bf65e9f4525d..47f023aa8495 100644
--- a/arch/arm/include/asm/ucontext.h
+++ b/arch/arm/include/asm/ucontext.h
@@ -59,23 +59,22 @@ struct iwmmxt_sigframe {
59#endif /* CONFIG_IWMMXT */ 59#endif /* CONFIG_IWMMXT */
60 60
61#ifdef CONFIG_VFP 61#ifdef CONFIG_VFP
62#if __LINUX_ARM_ARCH__ < 6
63/* For ARM pre-v6, we use fstmiax and fldmiax. This adds one extra
64 * word after the registers, and a word of padding at the end for
65 * alignment. */
66#define VFP_MAGIC 0x56465001 62#define VFP_MAGIC 0x56465001
67#define VFP_STORAGE_SIZE 152
68#else
69#define VFP_MAGIC 0x56465002
70#define VFP_STORAGE_SIZE 144
71#endif
72 63
73struct vfp_sigframe 64struct vfp_sigframe
74{ 65{
75 unsigned long magic; 66 unsigned long magic;
76 unsigned long size; 67 unsigned long size;
77 union vfp_state storage; 68 struct user_vfp ufp;
78}; 69 struct user_vfp_exc ufp_exc;
70} __attribute__((__aligned__(8)));
71
72/*
73 * 8 byte for magic and size, 264 byte for ufp, 12 bytes for ufp_exc,
74 * 4 bytes padding.
75 */
76#define VFP_STORAGE_SIZE sizeof(struct vfp_sigframe)
77
79#endif /* CONFIG_VFP */ 78#endif /* CONFIG_VFP */
80 79
81/* 80/*
@@ -91,7 +90,7 @@ struct aux_sigframe {
91#ifdef CONFIG_IWMMXT 90#ifdef CONFIG_IWMMXT
92 struct iwmmxt_sigframe iwmmxt; 91 struct iwmmxt_sigframe iwmmxt;
93#endif 92#endif
94#if 0 && defined CONFIG_VFP /* Not yet saved. */ 93#ifdef CONFIG_VFP
95 struct vfp_sigframe vfp; 94 struct vfp_sigframe vfp;
96#endif 95#endif
97 /* Something that isn't a valid magic number for any coprocessor. */ 96 /* Something that isn't a valid magic number for any coprocessor. */
diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h
index df95e050f9dd..05ac4b06876a 100644
--- a/arch/arm/include/asm/user.h
+++ b/arch/arm/include/asm/user.h
@@ -83,11 +83,21 @@ struct user{
83 83
84/* 84/*
85 * User specific VFP registers. If only VFPv2 is present, registers 16 to 31 85 * User specific VFP registers. If only VFPv2 is present, registers 16 to 31
86 * are ignored by the ptrace system call. 86 * are ignored by the ptrace system call and the signal handler.
87 */ 87 */
88struct user_vfp { 88struct user_vfp {
89 unsigned long long fpregs[32]; 89 unsigned long long fpregs[32];
90 unsigned long fpscr; 90 unsigned long fpscr;
91}; 91};
92 92
93/*
94 * VFP exception registers exposed to user space during signal delivery.
95 * Fields not relavant to the current VFP architecture are ignored.
96 */
97struct user_vfp_exc {
98 unsigned long fpexc;
99 unsigned long fpinst;
100 unsigned long fpinst2;
101};
102
93#endif /* _ARM_USER_H */ 103#endif /* _ARM_USER_H */
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index e7714f367eb8..907d5a620bca 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -18,6 +18,7 @@
18#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/ucontext.h> 19#include <asm/ucontext.h>
20#include <asm/unistd.h> 20#include <asm/unistd.h>
21#include <asm/vfp.h>
21 22
22#include "ptrace.h" 23#include "ptrace.h"
23#include "signal.h" 24#include "signal.h"
@@ -175,6 +176,90 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
175 176
176#endif 177#endif
177 178
179#ifdef CONFIG_VFP
180
181static int preserve_vfp_context(struct vfp_sigframe __user *frame)
182{
183 struct thread_info *thread = current_thread_info();
184 struct vfp_hard_struct *h = &thread->vfpstate.hard;
185 const unsigned long magic = VFP_MAGIC;
186 const unsigned long size = VFP_STORAGE_SIZE;
187 int err = 0;
188
189 vfp_sync_hwstate(thread);
190 __put_user_error(magic, &frame->magic, err);
191 __put_user_error(size, &frame->size, err);
192
193 /*
194 * Copy the floating point registers. There can be unused
195 * registers see asm/hwcap.h for details.
196 */
197 err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs,
198 sizeof(h->fpregs));
199 /*
200 * Copy the status and control register.
201 */
202 __put_user_error(h->fpscr, &frame->ufp.fpscr, err);
203
204 /*
205 * Copy the exception registers.
206 */
207 __put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err);
208 __put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
209 __put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
210
211 return err ? -EFAULT : 0;
212}
213
214static int restore_vfp_context(struct vfp_sigframe __user *frame)
215{
216 struct thread_info *thread = current_thread_info();
217 struct vfp_hard_struct *h = &thread->vfpstate.hard;
218 unsigned long magic;
219 unsigned long size;
220 unsigned long fpexc;
221 int err = 0;
222
223 __get_user_error(magic, &frame->magic, err);
224 __get_user_error(size, &frame->size, err);
225
226 if (err)
227 return -EFAULT;
228 if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
229 return -EINVAL;
230
231 /*
232 * Copy the floating point registers. There can be unused
233 * registers see asm/hwcap.h for details.
234 */
235 err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs,
236 sizeof(h->fpregs));
237 /*
238 * Copy the status and control register.
239 */
240 __get_user_error(h->fpscr, &frame->ufp.fpscr, err);
241
242 /*
243 * Sanitise and restore the exception registers.
244 */
245 __get_user_error(fpexc, &frame->ufp_exc.fpexc, err);
246 /* Ensure the VFP is enabled. */
247 fpexc |= FPEXC_EN;
248 /* Ensure FPINST2 is invalid and the exception flag is cleared. */
249 fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
250 h->fpexc = fpexc;
251
252 __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
253 __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
254
255 if (!err)
256 vfp_flush_hwstate(thread);
257
258 return err ? -EFAULT : 0;
259}
260
261#endif
262
178/* 263/*
179 * Do a signal return; undo the signal stack. These are aligned to 64-bit. 264 * Do a signal return; undo the signal stack. These are aligned to 64-bit.
180 */ 265 */
@@ -233,8 +318,8 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
233 err |= restore_iwmmxt_context(&aux->iwmmxt); 318 err |= restore_iwmmxt_context(&aux->iwmmxt);
234#endif 319#endif
235#ifdef CONFIG_VFP 320#ifdef CONFIG_VFP
236// if (err == 0) 321 if (err == 0)
237// err |= vfp_restore_state(&sf->aux.vfp); 322 err |= restore_vfp_context(&aux->vfp);
238#endif 323#endif
239 324
240 return err; 325 return err;
@@ -348,8 +433,8 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
348 err |= preserve_iwmmxt_context(&aux->iwmmxt); 433 err |= preserve_iwmmxt_context(&aux->iwmmxt);
349#endif 434#endif
350#ifdef CONFIG_VFP 435#ifdef CONFIG_VFP
351// if (err == 0) 436 if (err == 0)
352// err |= vfp_save_state(&sf->aux.vfp); 437 err |= preserve_vfp_context(&aux->vfp);
353#endif 438#endif
354 __put_user_error(0, &aux->end_magic, err); 439 __put_user_error(0, &aux->end_magic, err);
355 440
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index 027dd570dcc3..d4004557532a 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -16,8 +16,8 @@ obj-$(CONFIG_ARCH_AT91SAM9261) += at91sam9261.o at91sam926x_time.o at91sam9261_d
16obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o 16obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o
17obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o 17obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o
18obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o 18obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o
19obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o 19obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o
20 obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o 20obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o
21obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o 21obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o
22obj-$(CONFIG_ARCH_AT572D940HF) += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o 22obj-$(CONFIG_ARCH_AT572D940HF) += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o
23obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o 23obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o
diff --git a/arch/arm/mach-at91/pm_slowclock.S b/arch/arm/mach-at91/pm_slowclock.S
index 987fab3d846a..9c5b48e68a71 100644
--- a/arch/arm/mach-at91/pm_slowclock.S
+++ b/arch/arm/mach-at91/pm_slowclock.S
@@ -175,8 +175,6 @@ ENTRY(at91_slow_clock)
175 orr r3, r3, #(1 << 29) /* bit 29 always set */ 175 orr r3, r3, #(1 << 29) /* bit 29 always set */
176 str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)] 176 str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)]
177 177
178 wait_pllalock
179
180 /* Save PLLB setting and disable it */ 178 /* Save PLLB setting and disable it */
181 ldr r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] 179 ldr r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)]
182 str r3, .saved_pllbr 180 str r3, .saved_pllbr
@@ -184,8 +182,6 @@ ENTRY(at91_slow_clock)
184 mov r3, #AT91_PMC_PLLCOUNT 182 mov r3, #AT91_PMC_PLLCOUNT
185 str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] 183 str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)]
186 184
187 wait_pllblock
188
189 /* Turn off the main oscillator */ 185 /* Turn off the main oscillator */
190 ldr r3, [r1, #(AT91_CKGR_MOR - AT91_PMC)] 186 ldr r3, [r1, #(AT91_CKGR_MOR - AT91_PMC)]
191 bic r3, r3, #AT91_PMC_MOSCEN 187 bic r3, r3, #AT91_PMC_MOSCEN
@@ -205,13 +201,25 @@ ENTRY(at91_slow_clock)
205 ldr r3, .saved_pllbr 201 ldr r3, .saved_pllbr
206 str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] 202 str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)]
207 203
204 tst r3, #(AT91_PMC_MUL & 0xff0000)
205 bne 1f
206 tst r3, #(AT91_PMC_MUL & ~0xff0000)
207 beq 2f
2081:
208 wait_pllblock 209 wait_pllblock
2102:
209 211
210 /* Restore PLLA setting */ 212 /* Restore PLLA setting */
211 ldr r3, .saved_pllar 213 ldr r3, .saved_pllar
212 str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)] 214 str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)]
213 215
216 tst r3, #(AT91_PMC_MUL & 0xff0000)
217 bne 3f
218 tst r3, #(AT91_PMC_MUL & ~0xff0000)
219 beq 4f
2203:
214 wait_pllalock 221 wait_pllalock
2224:
215 223
216#ifdef SLOWDOWN_MASTER_CLOCK 224#ifdef SLOWDOWN_MASTER_CLOCK
217 /* 225 /*
diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c
index 2ccf670ce1ac..29c0a911df26 100644
--- a/arch/arm/mach-bcmring/dma.c
+++ b/arch/arm/mach-bcmring/dma.c
@@ -2221,11 +2221,15 @@ EXPORT_SYMBOL(dma_map_create_descriptor_ring);
2221int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */ 2221int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */
2222 int dirtied /* non-zero if any of the pages were modified */ 2222 int dirtied /* non-zero if any of the pages were modified */
2223 ) { 2223 ) {
2224
2225 int rc = 0;
2224 int regionIdx; 2226 int regionIdx;
2225 int segmentIdx; 2227 int segmentIdx;
2226 DMA_Region_t *region; 2228 DMA_Region_t *region;
2227 DMA_Segment_t *segment; 2229 DMA_Segment_t *segment;
2228 2230
2231 down(&memMap->lock);
2232
2229 for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) { 2233 for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
2230 region = &memMap->region[regionIdx]; 2234 region = &memMap->region[regionIdx];
2231 2235
@@ -2239,7 +2243,8 @@ int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */
2239 printk(KERN_ERR 2243 printk(KERN_ERR
2240 "%s: vmalloc'd pages are not yet supported\n", 2244 "%s: vmalloc'd pages are not yet supported\n",
2241 __func__); 2245 __func__);
2242 return -EINVAL; 2246 rc = -EINVAL;
2247 goto out;
2243 } 2248 }
2244 2249
2245 case DMA_MEM_TYPE_KMALLOC: 2250 case DMA_MEM_TYPE_KMALLOC:
@@ -2276,7 +2281,8 @@ int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */
2276 printk(KERN_ERR 2281 printk(KERN_ERR
2277 "%s: Unsupported memory type: %d\n", 2282 "%s: Unsupported memory type: %d\n",
2278 __func__, region->memType); 2283 __func__, region->memType);
2279 return -EINVAL; 2284 rc = -EINVAL;
2285 goto out;
2280 } 2286 }
2281 } 2287 }
2282 2288
@@ -2314,9 +2320,10 @@ int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */
2314 memMap->numRegionsUsed = 0; 2320 memMap->numRegionsUsed = 0;
2315 memMap->inUse = 0; 2321 memMap->inUse = 0;
2316 2322
2323out:
2317 up(&memMap->lock); 2324 up(&memMap->lock);
2318 2325
2319 return 0; 2326 return rc;
2320} 2327}
2321 2328
2322EXPORT_SYMBOL(dma_unmap); 2329EXPORT_SYMBOL(dma_unmap);
diff --git a/arch/arm/mach-ep93xx/gpio.c b/arch/arm/mach-ep93xx/gpio.c
index cc377ae8c428..cf547ad7ebd4 100644
--- a/arch/arm/mach-ep93xx/gpio.c
+++ b/arch/arm/mach-ep93xx/gpio.c
@@ -25,7 +25,7 @@
25#include <mach/hardware.h> 25#include <mach/hardware.h>
26 26
27/************************************************************************* 27/*************************************************************************
28 * GPIO handling for EP93xx 28 * Interrupt handling for EP93xx on-chip GPIOs
29 *************************************************************************/ 29 *************************************************************************/
30static unsigned char gpio_int_unmasked[3]; 30static unsigned char gpio_int_unmasked[3];
31static unsigned char gpio_int_enabled[3]; 31static unsigned char gpio_int_enabled[3];
@@ -40,7 +40,7 @@ static const u8 eoi_register_offset[3] = { 0x98, 0xb4, 0x54 };
40static const u8 int_en_register_offset[3] = { 0x9c, 0xb8, 0x58 }; 40static const u8 int_en_register_offset[3] = { 0x9c, 0xb8, 0x58 };
41static const u8 int_debounce_register_offset[3] = { 0xa8, 0xc4, 0x64 }; 41static const u8 int_debounce_register_offset[3] = { 0xa8, 0xc4, 0x64 };
42 42
43void ep93xx_gpio_update_int_params(unsigned port) 43static void ep93xx_gpio_update_int_params(unsigned port)
44{ 44{
45 BUG_ON(port > 2); 45 BUG_ON(port > 2);
46 46
@@ -56,7 +56,7 @@ void ep93xx_gpio_update_int_params(unsigned port)
56 EP93XX_GPIO_REG(int_en_register_offset[port])); 56 EP93XX_GPIO_REG(int_en_register_offset[port]));
57} 57}
58 58
59void ep93xx_gpio_int_mask(unsigned line) 59static inline void ep93xx_gpio_int_mask(unsigned line)
60{ 60{
61 gpio_int_unmasked[line >> 3] &= ~(1 << (line & 7)); 61 gpio_int_unmasked[line >> 3] &= ~(1 << (line & 7));
62} 62}
diff --git a/arch/arm/mach-mx3/Kconfig b/arch/arm/mach-mx3/Kconfig
index 3872af1cf2c3..170f68e46dd5 100644
--- a/arch/arm/mach-mx3/Kconfig
+++ b/arch/arm/mach-mx3/Kconfig
@@ -62,6 +62,15 @@ config MACH_MX31_3DS
62 Include support for MX31PDK (3DS) platform. This includes specific 62 Include support for MX31PDK (3DS) platform. This includes specific
63 configurations for the board and its peripherals. 63 configurations for the board and its peripherals.
64 64
65config MACH_MX31_3DS_MXC_NAND_USE_BBT
66 bool "Make the MXC NAND driver use the in flash Bad Block Table"
67 depends on MACH_MX31_3DS
68 depends on MTD_NAND_MXC
69 help
70 Enable this if you want that the MXC NAND driver uses the in flash
71 Bad Block Table to know what blocks are bad instead of scanning the
72 entire flash looking for bad block markers.
73
65config MACH_MX31MOBOARD 74config MACH_MX31MOBOARD
66 bool "Support mx31moboard platforms (EPFL Mobots group)" 75 bool "Support mx31moboard platforms (EPFL Mobots group)"
67 select ARCH_MX31 76 select ARCH_MX31
@@ -95,6 +104,7 @@ config MACH_PCM043
95config MACH_ARMADILLO5X0 104config MACH_ARMADILLO5X0
96 bool "Support Atmark Armadillo-500 Development Base Board" 105 bool "Support Atmark Armadillo-500 Development Base Board"
97 select ARCH_MX31 106 select ARCH_MX31
107 select MXC_ULPI if USB_ULPI
98 help 108 help
99 Include support for Atmark Armadillo-500 platform. This includes 109 Include support for Atmark Armadillo-500 platform. This includes
100 specific configurations for the board and its peripherals. 110 specific configurations for the board and its peripherals.
diff --git a/arch/arm/mach-mx3/clock-imx31.c b/arch/arm/mach-mx3/clock-imx31.c
index 80dba9966b5e..9a9eb6de6127 100644
--- a/arch/arm/mach-mx3/clock-imx31.c
+++ b/arch/arm/mach-mx3/clock-imx31.c
@@ -468,6 +468,7 @@ static struct clk ahb_clk = {
468 } 468 }
469 469
470DEFINE_CLOCK(perclk_clk, 0, NULL, 0, NULL, NULL, &ipg_clk); 470DEFINE_CLOCK(perclk_clk, 0, NULL, 0, NULL, NULL, &ipg_clk);
471DEFINE_CLOCK(ckil_clk, 0, NULL, 0, clk_ckil_get_rate, NULL, NULL);
471 472
472DEFINE_CLOCK(sdhc1_clk, 0, MXC_CCM_CGR0, 0, NULL, NULL, &perclk_clk); 473DEFINE_CLOCK(sdhc1_clk, 0, MXC_CCM_CGR0, 0, NULL, NULL, &perclk_clk);
473DEFINE_CLOCK(sdhc2_clk, 1, MXC_CCM_CGR0, 2, NULL, NULL, &perclk_clk); 474DEFINE_CLOCK(sdhc2_clk, 1, MXC_CCM_CGR0, 2, NULL, NULL, &perclk_clk);
@@ -490,7 +491,7 @@ DEFINE_CLOCK(mpeg4_clk, 0, MXC_CCM_CGR1, 0, NULL, NULL, &ahb_clk);
490DEFINE_CLOCK(mstick1_clk, 0, MXC_CCM_CGR1, 2, mstick1_get_rate, NULL, &usb_pll_clk); 491DEFINE_CLOCK(mstick1_clk, 0, MXC_CCM_CGR1, 2, mstick1_get_rate, NULL, &usb_pll_clk);
491DEFINE_CLOCK(mstick2_clk, 1, MXC_CCM_CGR1, 4, mstick2_get_rate, NULL, &usb_pll_clk); 492DEFINE_CLOCK(mstick2_clk, 1, MXC_CCM_CGR1, 4, mstick2_get_rate, NULL, &usb_pll_clk);
492DEFINE_CLOCK1(csi_clk, 0, MXC_CCM_CGR1, 6, csi, NULL, &serial_pll_clk); 493DEFINE_CLOCK1(csi_clk, 0, MXC_CCM_CGR1, 6, csi, NULL, &serial_pll_clk);
493DEFINE_CLOCK(rtc_clk, 0, MXC_CCM_CGR1, 8, NULL, NULL, &ipg_clk); 494DEFINE_CLOCK(rtc_clk, 0, MXC_CCM_CGR1, 8, NULL, NULL, &ckil_clk);
494DEFINE_CLOCK(wdog_clk, 0, MXC_CCM_CGR1, 10, NULL, NULL, &ipg_clk); 495DEFINE_CLOCK(wdog_clk, 0, MXC_CCM_CGR1, 10, NULL, NULL, &ipg_clk);
495DEFINE_CLOCK(pwm_clk, 0, MXC_CCM_CGR1, 12, NULL, NULL, &perclk_clk); 496DEFINE_CLOCK(pwm_clk, 0, MXC_CCM_CGR1, 12, NULL, NULL, &perclk_clk);
496DEFINE_CLOCK(usb_clk2, 0, MXC_CCM_CGR1, 18, usb_get_rate, NULL, &ahb_clk); 497DEFINE_CLOCK(usb_clk2, 0, MXC_CCM_CGR1, 18, usb_get_rate, NULL, &ahb_clk);
@@ -514,7 +515,6 @@ DEFINE_CLOCK(usb_clk1, 0, NULL, 0, usb_get_rate, NULL, &usb_pll_clk)
514DEFINE_CLOCK(nfc_clk, 0, NULL, 0, nfc_get_rate, NULL, &ahb_clk); 515DEFINE_CLOCK(nfc_clk, 0, NULL, 0, nfc_get_rate, NULL, &ahb_clk);
515DEFINE_CLOCK(scc_clk, 0, NULL, 0, NULL, NULL, &ipg_clk); 516DEFINE_CLOCK(scc_clk, 0, NULL, 0, NULL, NULL, &ipg_clk);
516DEFINE_CLOCK(ipg_clk, 0, NULL, 0, ipg_get_rate, NULL, &ahb_clk); 517DEFINE_CLOCK(ipg_clk, 0, NULL, 0, ipg_get_rate, NULL, &ahb_clk);
517DEFINE_CLOCK(ckil_clk, 0, NULL, 0, clk_ckil_get_rate, NULL, NULL);
518 518
519#define _REGISTER_CLOCK(d, n, c) \ 519#define _REGISTER_CLOCK(d, n, c) \
520 { \ 520 { \
@@ -572,7 +572,6 @@ static struct clk_lookup lookups[] = {
572 _REGISTER_CLOCK(NULL, "iim", iim_clk) 572 _REGISTER_CLOCK(NULL, "iim", iim_clk)
573 _REGISTER_CLOCK(NULL, "mpeg4", mpeg4_clk) 573 _REGISTER_CLOCK(NULL, "mpeg4", mpeg4_clk)
574 _REGISTER_CLOCK(NULL, "mbx", mbx_clk) 574 _REGISTER_CLOCK(NULL, "mbx", mbx_clk)
575 _REGISTER_CLOCK("mxc_rtc", NULL, ckil_clk)
576}; 575};
577 576
578int __init mx31_clocks_init(unsigned long fref) 577int __init mx31_clocks_init(unsigned long fref)
diff --git a/arch/arm/mach-mx3/devices.c b/arch/arm/mach-mx3/devices.c
index 6adb586515ea..f8911154a9fa 100644
--- a/arch/arm/mach-mx3/devices.c
+++ b/arch/arm/mach-mx3/devices.c
@@ -575,11 +575,26 @@ struct platform_device imx_ssi_device1 = {
575 .resource = imx_ssi_resources1, 575 .resource = imx_ssi_resources1,
576}; 576};
577 577
578static int mx3_devices_init(void) 578static struct resource imx_wdt_resources[] = {
579 {
580 .flags = IORESOURCE_MEM,
581 },
582};
583
584struct platform_device imx_wdt_device0 = {
585 .name = "imx-wdt",
586 .id = 0,
587 .num_resources = ARRAY_SIZE(imx_wdt_resources),
588 .resource = imx_wdt_resources,
589};
590
591static int __init mx3_devices_init(void)
579{ 592{
580 if (cpu_is_mx31()) { 593 if (cpu_is_mx31()) {
581 mxc_nand_resources[0].start = MX31_NFC_BASE_ADDR; 594 mxc_nand_resources[0].start = MX31_NFC_BASE_ADDR;
582 mxc_nand_resources[0].end = MX31_NFC_BASE_ADDR + 0xfff; 595 mxc_nand_resources[0].end = MX31_NFC_BASE_ADDR + 0xfff;
596 imx_wdt_resources[0].start = MX31_WDOG_BASE_ADDR;
597 imx_wdt_resources[0].end = MX31_WDOG_BASE_ADDR + 0x3fff;
583 mxc_register_device(&mxc_rnga_device, NULL); 598 mxc_register_device(&mxc_rnga_device, NULL);
584 } 599 }
585 if (cpu_is_mx35()) { 600 if (cpu_is_mx35()) {
@@ -597,6 +612,8 @@ static int mx3_devices_init(void)
597 imx_ssi_resources0[1].end = MX35_INT_SSI1; 612 imx_ssi_resources0[1].end = MX35_INT_SSI1;
598 imx_ssi_resources1[1].start = MX35_INT_SSI2; 613 imx_ssi_resources1[1].start = MX35_INT_SSI2;
599 imx_ssi_resources1[1].end = MX35_INT_SSI2; 614 imx_ssi_resources1[1].end = MX35_INT_SSI2;
615 imx_wdt_resources[0].start = MX35_WDOG_BASE_ADDR;
616 imx_wdt_resources[0].end = MX35_WDOG_BASE_ADDR + 0x3fff;
600 } 617 }
601 618
602 return 0; 619 return 0;
diff --git a/arch/arm/mach-mx3/devices.h b/arch/arm/mach-mx3/devices.h
index 42cf175eac6b..4f77eb501274 100644
--- a/arch/arm/mach-mx3/devices.h
+++ b/arch/arm/mach-mx3/devices.h
@@ -25,4 +25,5 @@ extern struct platform_device mxc_spi_device1;
25extern struct platform_device mxc_spi_device2; 25extern struct platform_device mxc_spi_device2;
26extern struct platform_device imx_ssi_device0; 26extern struct platform_device imx_ssi_device0;
27extern struct platform_device imx_ssi_device1; 27extern struct platform_device imx_ssi_device1;
28 28extern struct platform_device imx_ssi_device1;
29extern struct platform_device imx_wdt_device0;
diff --git a/arch/arm/mach-mx3/mach-armadillo5x0.c b/arch/arm/mach-mx3/mach-armadillo5x0.c
index 3d72b0b89705..5f72ec91af2d 100644
--- a/arch/arm/mach-mx3/mach-armadillo5x0.c
+++ b/arch/arm/mach-mx3/mach-armadillo5x0.c
@@ -36,6 +36,9 @@
36#include <linux/input.h> 36#include <linux/input.h>
37#include <linux/gpio_keys.h> 37#include <linux/gpio_keys.h>
38#include <linux/i2c.h> 38#include <linux/i2c.h>
39#include <linux/usb/otg.h>
40#include <linux/usb/ulpi.h>
41#include <linux/delay.h>
39 42
40#include <mach/hardware.h> 43#include <mach/hardware.h>
41#include <asm/mach-types.h> 44#include <asm/mach-types.h>
@@ -52,6 +55,8 @@
52#include <mach/ipu.h> 55#include <mach/ipu.h>
53#include <mach/mx3fb.h> 56#include <mach/mx3fb.h>
54#include <mach/mxc_nand.h> 57#include <mach/mxc_nand.h>
58#include <mach/mxc_ehci.h>
59#include <mach/ulpi.h>
55 60
56#include "devices.h" 61#include "devices.h"
57#include "crm_regs.h" 62#include "crm_regs.h"
@@ -103,8 +108,158 @@ static int armadillo5x0_pins[] = {
103 /* I2C2 */ 108 /* I2C2 */
104 MX31_PIN_CSPI2_MOSI__SCL, 109 MX31_PIN_CSPI2_MOSI__SCL,
105 MX31_PIN_CSPI2_MISO__SDA, 110 MX31_PIN_CSPI2_MISO__SDA,
111 /* OTG */
112 MX31_PIN_USBOTG_DATA0__USBOTG_DATA0,
113 MX31_PIN_USBOTG_DATA1__USBOTG_DATA1,
114 MX31_PIN_USBOTG_DATA2__USBOTG_DATA2,
115 MX31_PIN_USBOTG_DATA3__USBOTG_DATA3,
116 MX31_PIN_USBOTG_DATA4__USBOTG_DATA4,
117 MX31_PIN_USBOTG_DATA5__USBOTG_DATA5,
118 MX31_PIN_USBOTG_DATA6__USBOTG_DATA6,
119 MX31_PIN_USBOTG_DATA7__USBOTG_DATA7,
120 MX31_PIN_USBOTG_CLK__USBOTG_CLK,
121 MX31_PIN_USBOTG_DIR__USBOTG_DIR,
122 MX31_PIN_USBOTG_NXT__USBOTG_NXT,
123 MX31_PIN_USBOTG_STP__USBOTG_STP,
124 /* USB host 2 */
125 IOMUX_MODE(MX31_PIN_USBH2_CLK, IOMUX_CONFIG_FUNC),
126 IOMUX_MODE(MX31_PIN_USBH2_DIR, IOMUX_CONFIG_FUNC),
127 IOMUX_MODE(MX31_PIN_USBH2_NXT, IOMUX_CONFIG_FUNC),
128 IOMUX_MODE(MX31_PIN_USBH2_STP, IOMUX_CONFIG_FUNC),
129 IOMUX_MODE(MX31_PIN_USBH2_DATA0, IOMUX_CONFIG_FUNC),
130 IOMUX_MODE(MX31_PIN_USBH2_DATA1, IOMUX_CONFIG_FUNC),
131 IOMUX_MODE(MX31_PIN_STXD3, IOMUX_CONFIG_FUNC),
132 IOMUX_MODE(MX31_PIN_SRXD3, IOMUX_CONFIG_FUNC),
133 IOMUX_MODE(MX31_PIN_SCK3, IOMUX_CONFIG_FUNC),
134 IOMUX_MODE(MX31_PIN_SFS3, IOMUX_CONFIG_FUNC),
135 IOMUX_MODE(MX31_PIN_STXD6, IOMUX_CONFIG_FUNC),
136 IOMUX_MODE(MX31_PIN_SRXD6, IOMUX_CONFIG_FUNC),
106}; 137};
107 138
139/* USB */
140#if defined(CONFIG_USB_ULPI)
141
142#define OTG_RESET IOMUX_TO_GPIO(MX31_PIN_STXD4)
143#define USBH2_RESET IOMUX_TO_GPIO(MX31_PIN_SCK6)
144#define USBH2_CS IOMUX_TO_GPIO(MX31_PIN_GPIO1_3)
145
146#define USB_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \
147 PAD_CTL_ODE_CMOS | PAD_CTL_100K_PU)
148
149static int usbotg_init(struct platform_device *pdev)
150{
151 int err;
152
153 mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA0, USB_PAD_CFG);
154 mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA1, USB_PAD_CFG);
155 mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA2, USB_PAD_CFG);
156 mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA3, USB_PAD_CFG);
157 mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA4, USB_PAD_CFG);
158 mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA5, USB_PAD_CFG);
159 mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA6, USB_PAD_CFG);
160 mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA7, USB_PAD_CFG);
161 mxc_iomux_set_pad(MX31_PIN_USBOTG_CLK, USB_PAD_CFG);
162 mxc_iomux_set_pad(MX31_PIN_USBOTG_DIR, USB_PAD_CFG);
163 mxc_iomux_set_pad(MX31_PIN_USBOTG_NXT, USB_PAD_CFG);
164 mxc_iomux_set_pad(MX31_PIN_USBOTG_STP, USB_PAD_CFG);
165
166 /* Chip already enabled by hardware */
167 /* OTG phy reset*/
168 err = gpio_request(OTG_RESET, "USB-OTG-RESET");
169 if (err) {
170 pr_err("Failed to request the usb otg reset gpio\n");
171 return err;
172 }
173
174 err = gpio_direction_output(OTG_RESET, 1/*HIGH*/);
175 if (err) {
176 pr_err("Failed to reset the usb otg phy\n");
177 goto otg_free_reset;
178 }
179
180 gpio_set_value(OTG_RESET, 0/*LOW*/);
181 mdelay(5);
182 gpio_set_value(OTG_RESET, 1/*HIGH*/);
183
184 return 0;
185
186otg_free_reset:
187 gpio_free(OTG_RESET);
188 return err;
189}
190
191static int usbh2_init(struct platform_device *pdev)
192{
193 int err;
194
195 mxc_iomux_set_pad(MX31_PIN_USBH2_CLK, USB_PAD_CFG);
196 mxc_iomux_set_pad(MX31_PIN_USBH2_DIR, USB_PAD_CFG);
197 mxc_iomux_set_pad(MX31_PIN_USBH2_NXT, USB_PAD_CFG);
198 mxc_iomux_set_pad(MX31_PIN_USBH2_STP, USB_PAD_CFG);
199 mxc_iomux_set_pad(MX31_PIN_USBH2_DATA0, USB_PAD_CFG);
200 mxc_iomux_set_pad(MX31_PIN_USBH2_DATA1, USB_PAD_CFG);
201 mxc_iomux_set_pad(MX31_PIN_SRXD6, USB_PAD_CFG);
202 mxc_iomux_set_pad(MX31_PIN_STXD6, USB_PAD_CFG);
203 mxc_iomux_set_pad(MX31_PIN_SFS3, USB_PAD_CFG);
204 mxc_iomux_set_pad(MX31_PIN_SCK3, USB_PAD_CFG);
205 mxc_iomux_set_pad(MX31_PIN_SRXD3, USB_PAD_CFG);
206 mxc_iomux_set_pad(MX31_PIN_STXD3, USB_PAD_CFG);
207
208 mxc_iomux_set_gpr(MUX_PGP_UH2, true);
209
210
211 /* Enable the chip */
212 err = gpio_request(USBH2_CS, "USB-H2-CS");
213 if (err) {
214 pr_err("Failed to request the usb host 2 CS gpio\n");
215 return err;
216 }
217
218 err = gpio_direction_output(USBH2_CS, 0/*Enabled*/);
219 if (err) {
220 pr_err("Failed to drive the usb host 2 CS gpio\n");
221 goto h2_free_cs;
222 }
223
224 /* H2 phy reset*/
225 err = gpio_request(USBH2_RESET, "USB-H2-RESET");
226 if (err) {
227 pr_err("Failed to request the usb host 2 reset gpio\n");
228 goto h2_free_cs;
229 }
230
231 err = gpio_direction_output(USBH2_RESET, 1/*HIGH*/);
232 if (err) {
233 pr_err("Failed to reset the usb host 2 phy\n");
234 goto h2_free_reset;
235 }
236
237 gpio_set_value(USBH2_RESET, 0/*LOW*/);
238 mdelay(5);
239 gpio_set_value(USBH2_RESET, 1/*HIGH*/);
240
241 return 0;
242
243h2_free_reset:
244 gpio_free(USBH2_RESET);
245h2_free_cs:
246 gpio_free(USBH2_CS);
247 return err;
248}
249
250static struct mxc_usbh_platform_data usbotg_pdata = {
251 .init = usbotg_init,
252 .portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
253 .flags = MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_INTERFACE_DIFF_UNI,
254};
255
256static struct mxc_usbh_platform_data usbh2_pdata = {
257 .init = usbh2_init,
258 .portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
259 .flags = MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_INTERFACE_DIFF_UNI,
260};
261#endif /* CONFIG_USB_ULPI */
262
108/* RTC over I2C*/ 263/* RTC over I2C*/
109#define ARMADILLO5X0_RTC_GPIO IOMUX_TO_GPIO(MX31_PIN_SRXD4) 264#define ARMADILLO5X0_RTC_GPIO IOMUX_TO_GPIO(MX31_PIN_SRXD4)
110 265
@@ -393,6 +548,17 @@ static void __init armadillo5x0_init(void)
393 if (armadillo5x0_i2c_rtc.irq == 0) 548 if (armadillo5x0_i2c_rtc.irq == 0)
394 pr_warning("armadillo5x0_init: failed to get RTC IRQ\n"); 549 pr_warning("armadillo5x0_init: failed to get RTC IRQ\n");
395 i2c_register_board_info(1, &armadillo5x0_i2c_rtc, 1); 550 i2c_register_board_info(1, &armadillo5x0_i2c_rtc, 1);
551
552 /* USB */
553#if defined(CONFIG_USB_ULPI)
554 usbotg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
555 USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
556 usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
557 USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
558
559 mxc_register_device(&mxc_otg_host, &usbotg_pdata);
560 mxc_register_device(&mxc_usbh2, &usbh2_pdata);
561#endif
396} 562}
397 563
398static void __init armadillo5x0_timer_init(void) 564static void __init armadillo5x0_timer_init(void)
diff --git a/arch/arm/mach-mx3/mach-mx31_3ds.c b/arch/arm/mach-mx3/mach-mx31_3ds.c
index b88c18ad7698..f54af1e29ca4 100644
--- a/arch/arm/mach-mx3/mach-mx31_3ds.c
+++ b/arch/arm/mach-mx3/mach-mx31_3ds.c
@@ -23,6 +23,9 @@
23#include <linux/gpio.h> 23#include <linux/gpio.h>
24#include <linux/smsc911x.h> 24#include <linux/smsc911x.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/mfd/mc13783.h>
27#include <linux/spi/spi.h>
28#include <linux/regulator/machine.h>
26 29
27#include <mach/hardware.h> 30#include <mach/hardware.h>
28#include <asm/mach-types.h> 31#include <asm/mach-types.h>
@@ -31,26 +34,96 @@
31#include <asm/memory.h> 34#include <asm/memory.h>
32#include <asm/mach/map.h> 35#include <asm/mach/map.h>
33#include <mach/common.h> 36#include <mach/common.h>
34#include <mach/board-mx31pdk.h> 37#include <mach/board-mx31_3ds.h>
35#include <mach/imx-uart.h> 38#include <mach/imx-uart.h>
36#include <mach/iomux-mx3.h> 39#include <mach/iomux-mx3.h>
40#include <mach/mxc_nand.h>
41#include <mach/spi.h>
37#include "devices.h" 42#include "devices.h"
38 43
39/*! 44/*!
40 * @file mx31pdk.c 45 * @file mx31_3ds.c
41 * 46 *
42 * @brief This file contains the board-specific initialization routines. 47 * @brief This file contains the board-specific initialization routines.
43 * 48 *
44 * @ingroup System 49 * @ingroup System
45 */ 50 */
46 51
47static int mx31pdk_pins[] = { 52static int mx31_3ds_pins[] = {
48 /* UART1 */ 53 /* UART1 */
49 MX31_PIN_CTS1__CTS1, 54 MX31_PIN_CTS1__CTS1,
50 MX31_PIN_RTS1__RTS1, 55 MX31_PIN_RTS1__RTS1,
51 MX31_PIN_TXD1__TXD1, 56 MX31_PIN_TXD1__TXD1,
52 MX31_PIN_RXD1__RXD1, 57 MX31_PIN_RXD1__RXD1,
53 IOMUX_MODE(MX31_PIN_GPIO1_1, IOMUX_CONFIG_GPIO), 58 IOMUX_MODE(MX31_PIN_GPIO1_1, IOMUX_CONFIG_GPIO),
59 /* SPI 1 */
60 MX31_PIN_CSPI2_SCLK__SCLK,
61 MX31_PIN_CSPI2_MOSI__MOSI,
62 MX31_PIN_CSPI2_MISO__MISO,
63 MX31_PIN_CSPI2_SPI_RDY__SPI_RDY,
64 MX31_PIN_CSPI2_SS0__SS0,
65 MX31_PIN_CSPI2_SS2__SS2, /*CS for MC13783 */
66 /* MC13783 IRQ */
67 IOMUX_MODE(MX31_PIN_GPIO1_3, IOMUX_CONFIG_GPIO),
68};
69
70/* Regulators */
71static struct regulator_init_data pwgtx_init = {
72 .constraints = {
73 .boot_on = 1,
74 .always_on = 1,
75 },
76};
77
78static struct mc13783_regulator_init_data mx31_3ds_regulators[] = {
79 {
80 .id = MC13783_REGU_PWGT1SPI, /* Power Gate for ARM core. */
81 .init_data = &pwgtx_init,
82 }, {
83 .id = MC13783_REGU_PWGT2SPI, /* Power Gate for L2 Cache. */
84 .init_data = &pwgtx_init,
85 },
86};
87
88/* MC13783 */
89static struct mc13783_platform_data mc13783_pdata __initdata = {
90 .regulators = mx31_3ds_regulators,
91 .num_regulators = ARRAY_SIZE(mx31_3ds_regulators),
92 .flags = MC13783_USE_REGULATOR,
93};
94
95/* SPI */
96static int spi1_internal_chipselect[] = {
97 MXC_SPI_CS(0),
98 MXC_SPI_CS(2),
99};
100
101static struct spi_imx_master spi1_pdata = {
102 .chipselect = spi1_internal_chipselect,
103 .num_chipselect = ARRAY_SIZE(spi1_internal_chipselect),
104};
105
106static struct spi_board_info mx31_3ds_spi_devs[] __initdata = {
107 {
108 .modalias = "mc13783",
109 .max_speed_hz = 1000000,
110 .bus_num = 1,
111 .chip_select = 1, /* SS2 */
112 .platform_data = &mc13783_pdata,
113 .irq = IOMUX_TO_IRQ(MX31_PIN_GPIO1_3),
114 .mode = SPI_CS_HIGH,
115 },
116};
117
118/*
119 * NAND Flash
120 */
121static struct mxc_nand_platform_data imx31_3ds_nand_flash_pdata = {
122 .width = 1,
123 .hw_ecc = 1,
124#ifdef MACH_MX31_3DS_MXC_NAND_USE_BBT
125 .flash_bbt = 1,
126#endif
54}; 127};
55 128
56static struct imxuart_platform_data uart_pdata = { 129static struct imxuart_platform_data uart_pdata = {
@@ -95,7 +168,7 @@ static struct platform_device smsc911x_device = {
95 * LEDs, switches, interrupts for Ethernet. 168 * LEDs, switches, interrupts for Ethernet.
96 */ 169 */
97 170
98static void mx31pdk_expio_irq_handler(uint32_t irq, struct irq_desc *desc) 171static void mx31_3ds_expio_irq_handler(uint32_t irq, struct irq_desc *desc)
99{ 172{
100 uint32_t imr_val; 173 uint32_t imr_val;
101 uint32_t int_valid; 174 uint32_t int_valid;
@@ -163,7 +236,7 @@ static struct irq_chip expio_irq_chip = {
163 .unmask = expio_unmask_irq, 236 .unmask = expio_unmask_irq,
164}; 237};
165 238
166static int __init mx31pdk_init_expio(void) 239static int __init mx31_3ds_init_expio(void)
167{ 240{
168 int i; 241 int i;
169 int ret; 242 int ret;
@@ -176,7 +249,7 @@ static int __init mx31pdk_init_expio(void)
176 return -ENODEV; 249 return -ENODEV;
177 } 250 }
178 251
179 pr_info("i.MX31PDK Debug board detected, rev = 0x%04X\n", 252 pr_info("i.MX31 3DS Debug board detected, rev = 0x%04X\n",
180 __raw_readw(CPLD_CODE_VER_REG)); 253 __raw_readw(CPLD_CODE_VER_REG));
181 254
182 /* 255 /*
@@ -201,7 +274,7 @@ static int __init mx31pdk_init_expio(void)
201 set_irq_flags(i, IRQF_VALID); 274 set_irq_flags(i, IRQF_VALID);
202 } 275 }
203 set_irq_type(EXPIO_PARENT_INT, IRQ_TYPE_LEVEL_LOW); 276 set_irq_type(EXPIO_PARENT_INT, IRQ_TYPE_LEVEL_LOW);
204 set_irq_chained_handler(EXPIO_PARENT_INT, mx31pdk_expio_irq_handler); 277 set_irq_chained_handler(EXPIO_PARENT_INT, mx31_3ds_expio_irq_handler);
205 278
206 return 0; 279 return 0;
207} 280}
@@ -209,7 +282,7 @@ static int __init mx31pdk_init_expio(void)
209/* 282/*
210 * This structure defines the MX31 memory map. 283 * This structure defines the MX31 memory map.
211 */ 284 */
212static struct map_desc mx31pdk_io_desc[] __initdata = { 285static struct map_desc mx31_3ds_io_desc[] __initdata = {
213 { 286 {
214 .virtual = MX31_CS5_BASE_ADDR_VIRT, 287 .virtual = MX31_CS5_BASE_ADDR_VIRT,
215 .pfn = __phys_to_pfn(MX31_CS5_BASE_ADDR), 288 .pfn = __phys_to_pfn(MX31_CS5_BASE_ADDR),
@@ -221,10 +294,10 @@ static struct map_desc mx31pdk_io_desc[] __initdata = {
221/* 294/*
222 * Set up static virtual mappings. 295 * Set up static virtual mappings.
223 */ 296 */
224static void __init mx31pdk_map_io(void) 297static void __init mx31_3ds_map_io(void)
225{ 298{
226 mx31_map_io(); 299 mx31_map_io();
227 iotable_init(mx31pdk_io_desc, ARRAY_SIZE(mx31pdk_io_desc)); 300 iotable_init(mx31_3ds_io_desc, ARRAY_SIZE(mx31_3ds_io_desc));
228} 301}
229 302
230/*! 303/*!
@@ -232,35 +305,40 @@ static void __init mx31pdk_map_io(void)
232 */ 305 */
233static void __init mxc_board_init(void) 306static void __init mxc_board_init(void)
234{ 307{
235 mxc_iomux_setup_multiple_pins(mx31pdk_pins, ARRAY_SIZE(mx31pdk_pins), 308 mxc_iomux_setup_multiple_pins(mx31_3ds_pins, ARRAY_SIZE(mx31_3ds_pins),
236 "mx31pdk"); 309 "mx31_3ds");
237 310
238 mxc_register_device(&mxc_uart_device0, &uart_pdata); 311 mxc_register_device(&mxc_uart_device0, &uart_pdata);
312 mxc_register_device(&mxc_nand_device, &imx31_3ds_nand_flash_pdata);
313
314 mxc_register_device(&mxc_spi_device1, &spi1_pdata);
315 spi_register_board_info(mx31_3ds_spi_devs,
316 ARRAY_SIZE(mx31_3ds_spi_devs));
239 317
240 if (!mx31pdk_init_expio()) 318 if (!mx31_3ds_init_expio())
241 platform_device_register(&smsc911x_device); 319 platform_device_register(&smsc911x_device);
242} 320}
243 321
244static void __init mx31pdk_timer_init(void) 322static void __init mx31_3ds_timer_init(void)
245{ 323{
246 mx31_clocks_init(26000000); 324 mx31_clocks_init(26000000);
247} 325}
248 326
249static struct sys_timer mx31pdk_timer = { 327static struct sys_timer mx31_3ds_timer = {
250 .init = mx31pdk_timer_init, 328 .init = mx31_3ds_timer_init,
251}; 329};
252 330
253/* 331/*
254 * The following uses standard kernel macros defined in arch.h in order to 332 * The following uses standard kernel macros defined in arch.h in order to
255 * initialize __mach_desc_MX31PDK data structure. 333 * initialize __mach_desc_MX31_3DS data structure.
256 */ 334 */
257MACHINE_START(MX31_3DS, "Freescale MX31PDK (3DS)") 335MACHINE_START(MX31_3DS, "Freescale MX31PDK (3DS)")
258 /* Maintainer: Freescale Semiconductor, Inc. */ 336 /* Maintainer: Freescale Semiconductor, Inc. */
259 .phys_io = MX31_AIPS1_BASE_ADDR, 337 .phys_io = MX31_AIPS1_BASE_ADDR,
260 .io_pg_offst = (MX31_AIPS1_BASE_ADDR_VIRT >> 18) & 0xfffc, 338 .io_pg_offst = (MX31_AIPS1_BASE_ADDR_VIRT >> 18) & 0xfffc,
261 .boot_params = MX3x_PHYS_OFFSET + 0x100, 339 .boot_params = MX3x_PHYS_OFFSET + 0x100,
262 .map_io = mx31pdk_map_io, 340 .map_io = mx31_3ds_map_io,
263 .init_irq = mx31_init_irq, 341 .init_irq = mx31_init_irq,
264 .init_machine = mxc_board_init, 342 .init_machine = mxc_board_init,
265 .timer = &mx31pdk_timer, 343 .timer = &mx31_3ds_timer,
266MACHINE_END 344MACHINE_END
diff --git a/arch/arm/mach-mx3/mach-pcm037.c b/arch/arm/mach-mx3/mach-pcm037.c
index 034ec8190065..2df1ec55a97e 100644
--- a/arch/arm/mach-mx3/mach-pcm037.c
+++ b/arch/arm/mach-mx3/mach-pcm037.c
@@ -35,7 +35,6 @@
35#include <linux/can/platform/sja1000.h> 35#include <linux/can/platform/sja1000.h>
36#include <linux/usb/otg.h> 36#include <linux/usb/otg.h>
37#include <linux/usb/ulpi.h> 37#include <linux/usb/ulpi.h>
38#include <linux/fsl_devices.h>
39#include <linux/gfp.h> 38#include <linux/gfp.h>
40 39
41#include <media/soc_camera.h> 40#include <media/soc_camera.h>
diff --git a/arch/arm/mach-mx3/mx31lite-db.c b/arch/arm/mach-mx3/mx31lite-db.c
index ccd874225c3b..093c595ca581 100644
--- a/arch/arm/mach-mx3/mx31lite-db.c
+++ b/arch/arm/mach-mx3/mx31lite-db.c
@@ -28,7 +28,6 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/gpio.h> 30#include <linux/gpio.h>
31#include <linux/platform_device.h>
32#include <linux/leds.h> 31#include <linux/leds.h>
33#include <linux/platform_device.h> 32#include <linux/platform_device.h>
34 33
@@ -206,5 +205,6 @@ void __init mx31lite_db_init(void)
206 mxc_register_device(&mxcsdhc_device0, &mmc_pdata); 205 mxc_register_device(&mxcsdhc_device0, &mmc_pdata);
207 mxc_register_device(&mxc_spi_device0, &spi0_pdata); 206 mxc_register_device(&mxc_spi_device0, &spi0_pdata);
208 platform_device_register(&litekit_led_device); 207 platform_device_register(&litekit_led_device);
208 mxc_register_device(&imx_wdt_device0, NULL);
209} 209}
210 210
diff --git a/arch/arm/mach-mx5/clock-mx51.c b/arch/arm/mach-mx5/clock-mx51.c
index be90c03101cd..8f85f73b83a8 100644
--- a/arch/arm/mach-mx5/clock-mx51.c
+++ b/arch/arm/mach-mx5/clock-mx51.c
@@ -757,7 +757,7 @@ DEFINE_CLOCK(uart3_ipg_clk, 2, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG7_OFFSET,
757 757
758/* GPT */ 758/* GPT */
759DEFINE_CLOCK(gpt_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG9_OFFSET, 759DEFINE_CLOCK(gpt_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG9_OFFSET,
760 NULL, NULL, &ipg_perclk, NULL); 760 NULL, NULL, &ipg_clk, NULL);
761DEFINE_CLOCK(gpt_ipg_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG10_OFFSET, 761DEFINE_CLOCK(gpt_ipg_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG10_OFFSET,
762 NULL, NULL, &ipg_clk, NULL); 762 NULL, NULL, &ipg_clk, NULL);
763 763
diff --git a/arch/arm/mach-mx5/cpu.c b/arch/arm/mach-mx5/cpu.c
index 41c769f08c4d..2d37785e3857 100644
--- a/arch/arm/mach-mx5/cpu.c
+++ b/arch/arm/mach-mx5/cpu.c
@@ -14,9 +14,62 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/module.h>
17#include <mach/hardware.h> 18#include <mach/hardware.h>
18#include <asm/io.h> 19#include <asm/io.h>
19 20
21static int cpu_silicon_rev = -1;
22
23#define SI_REV 0x48
24
25static void query_silicon_parameter(void)
26{
27 void __iomem *rom = ioremap(MX51_IROM_BASE_ADDR, MX51_IROM_SIZE);
28 u32 rev;
29
30 if (!rom) {
31 cpu_silicon_rev = -EINVAL;
32 return;
33 }
34
35 rev = readl(rom + SI_REV);
36 switch (rev) {
37 case 0x1:
38 cpu_silicon_rev = MX51_CHIP_REV_1_0;
39 break;
40 case 0x2:
41 cpu_silicon_rev = MX51_CHIP_REV_1_1;
42 break;
43 case 0x10:
44 cpu_silicon_rev = MX51_CHIP_REV_2_0;
45 break;
46 case 0x20:
47 cpu_silicon_rev = MX51_CHIP_REV_3_0;
48 break;
49 default:
50 cpu_silicon_rev = 0;
51 }
52
53 iounmap(rom);
54}
55
56/*
57 * Returns:
58 * the silicon revision of the cpu
59 * -EINVAL - not a mx51
60 */
61int mx51_revision(void)
62{
63 if (!cpu_is_mx51())
64 return -EINVAL;
65
66 if (cpu_silicon_rev == -1)
67 query_silicon_parameter();
68
69 return cpu_silicon_rev;
70}
71EXPORT_SYMBOL(mx51_revision);
72
20static int __init post_cpu_init(void) 73static int __init post_cpu_init(void)
21{ 74{
22 unsigned int reg; 75 unsigned int reg;
diff --git a/arch/arm/mach-mx5/mm.c b/arch/arm/mach-mx5/mm.c
index c21e18be7af8..b7677ef80cc4 100644
--- a/arch/arm/mach-mx5/mm.c
+++ b/arch/arm/mach-mx5/mm.c
@@ -35,11 +35,6 @@ static struct map_desc mxc_io_desc[] __initdata = {
35 .length = MX51_DEBUG_SIZE, 35 .length = MX51_DEBUG_SIZE,
36 .type = MT_DEVICE 36 .type = MT_DEVICE
37 }, { 37 }, {
38 .virtual = MX51_TZIC_BASE_ADDR_VIRT,
39 .pfn = __phys_to_pfn(MX51_TZIC_BASE_ADDR),
40 .length = MX51_TZIC_SIZE,
41 .type = MT_DEVICE
42 }, {
43 .virtual = MX51_AIPS1_BASE_ADDR_VIRT, 38 .virtual = MX51_AIPS1_BASE_ADDR_VIRT,
44 .pfn = __phys_to_pfn(MX51_AIPS1_BASE_ADDR), 39 .pfn = __phys_to_pfn(MX51_AIPS1_BASE_ADDR),
45 .length = MX51_AIPS1_SIZE, 40 .length = MX51_AIPS1_SIZE,
@@ -54,11 +49,6 @@ static struct map_desc mxc_io_desc[] __initdata = {
54 .pfn = __phys_to_pfn(MX51_AIPS2_BASE_ADDR), 49 .pfn = __phys_to_pfn(MX51_AIPS2_BASE_ADDR),
55 .length = MX51_AIPS2_SIZE, 50 .length = MX51_AIPS2_SIZE,
56 .type = MT_DEVICE 51 .type = MT_DEVICE
57 }, {
58 .virtual = MX51_NFC_AXI_BASE_ADDR_VIRT,
59 .pfn = __phys_to_pfn(MX51_NFC_AXI_BASE_ADDR),
60 .length = MX51_NFC_AXI_SIZE,
61 .type = MT_DEVICE
62 }, 52 },
63}; 53};
64 54
@@ -69,14 +59,6 @@ static struct map_desc mxc_io_desc[] __initdata = {
69 */ 59 */
70void __init mx51_map_io(void) 60void __init mx51_map_io(void)
71{ 61{
72 u32 tzic_addr;
73
74 if (mx51_revision() < MX51_CHIP_REV_2_0)
75 tzic_addr = 0x8FFFC000;
76 else
77 tzic_addr = 0xE0003000;
78 mxc_io_desc[2].pfn = __phys_to_pfn(tzic_addr);
79
80 mxc_set_cpu_type(MXC_CPU_MX51); 62 mxc_set_cpu_type(MXC_CPU_MX51);
81 mxc_iomux_v3_init(MX51_IO_ADDRESS(MX51_IOMUXC_BASE_ADDR)); 63 mxc_iomux_v3_init(MX51_IO_ADDRESS(MX51_IOMUXC_BASE_ADDR));
82 mxc_arch_reset_init(MX51_IO_ADDRESS(MX51_WDOG_BASE_ADDR)); 64 mxc_arch_reset_init(MX51_IO_ADDRESS(MX51_WDOG_BASE_ADDR));
@@ -85,5 +67,17 @@ void __init mx51_map_io(void)
85 67
86void __init mx51_init_irq(void) 68void __init mx51_init_irq(void)
87{ 69{
88 tzic_init_irq(MX51_IO_ADDRESS(MX51_TZIC_BASE_ADDR)); 70 unsigned long tzic_addr;
71 void __iomem *tzic_virt;
72
73 if (mx51_revision() < MX51_CHIP_REV_2_0)
74 tzic_addr = MX51_TZIC_BASE_ADDR_TO1;
75 else
76 tzic_addr = MX51_TZIC_BASE_ADDR;
77
78 tzic_virt = ioremap(tzic_addr, SZ_16K);
79 if (!tzic_virt)
80 panic("unable to map TZIC interrupt controller\n");
81
82 tzic_init_irq(tzic_virt);
89} 83}
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 8bca4dea6dfa..f55fa1044f72 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -41,14 +41,7 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
41 kfrom = kmap_atomic(from, KM_USER0); 41 kfrom = kmap_atomic(from, KM_USER0);
42 kto = kmap_atomic(to, KM_USER1); 42 kto = kmap_atomic(to, KM_USER1);
43 copy_page(kto, kfrom); 43 copy_page(kto, kfrom);
44#ifdef CONFIG_HIGHMEM 44 __cpuc_flush_dcache_area(kto, PAGE_SIZE);
45 /*
46 * kmap_atomic() doesn't set the page virtual address, and
47 * kunmap_atomic() takes care of cache flushing already.
48 */
49 if (page_address(to) != NULL)
50#endif
51 __cpuc_flush_dcache_area(kto, PAGE_SIZE);
52 kunmap_atomic(kto, KM_USER1); 45 kunmap_atomic(kto, KM_USER1);
53 kunmap_atomic(kfrom, KM_USER0); 46 kunmap_atomic(kfrom, KM_USER0);
54} 47}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1351edc0b26f..13fa536d82e6 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -464,6 +464,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
464 vaddr += offset; 464 vaddr += offset;
465 op(vaddr, len, dir); 465 op(vaddr, len, dir);
466 kunmap_high(page); 466 kunmap_high(page);
467 } else if (cache_is_vipt()) {
468 pte_t saved_pte;
469 vaddr = kmap_high_l1_vipt(page, &saved_pte);
470 op(vaddr + offset, len, dir);
471 kunmap_high_l1_vipt(page, saved_pte);
467 } 472 }
468 } else { 473 } else {
469 vaddr = page_address(page) + offset; 474 vaddr = page_address(page) + offset;
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index e34f095e2090..c6844cb9b508 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -13,6 +13,7 @@
13 13
14#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
15#include <asm/cachetype.h> 15#include <asm/cachetype.h>
16#include <asm/highmem.h>
16#include <asm/smp_plat.h> 17#include <asm/smp_plat.h>
17#include <asm/system.h> 18#include <asm/system.h>
18#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
@@ -152,21 +153,25 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
152 153
153void __flush_dcache_page(struct address_space *mapping, struct page *page) 154void __flush_dcache_page(struct address_space *mapping, struct page *page)
154{ 155{
155 void *addr = page_address(page);
156
157 /* 156 /*
158 * Writeback any data associated with the kernel mapping of this 157 * Writeback any data associated with the kernel mapping of this
159 * page. This ensures that data in the physical page is mutually 158 * page. This ensures that data in the physical page is mutually
160 * coherent with the kernels mapping. 159 * coherent with the kernels mapping.
161 */ 160 */
162#ifdef CONFIG_HIGHMEM 161 if (!PageHighMem(page)) {
163 /* 162 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
164 * kmap_atomic() doesn't set the page virtual address, and 163 } else {
165 * kunmap_atomic() takes care of cache flushing already. 164 void *addr = kmap_high_get(page);
166 */ 165 if (addr) {
167 if (addr) 166 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
168#endif 167 kunmap_high(page);
169 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 168 } else if (cache_is_vipt()) {
169 pte_t saved_pte;
170 addr = kmap_high_l1_vipt(page, &saved_pte);
171 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
172 kunmap_high_l1_vipt(page, saved_pte);
173 }
174 }
170 175
171 /* 176 /*
172 * If this is a page cache page, and we have an aliasing VIPT cache, 177 * If this is a page cache page, and we have an aliasing VIPT cache,
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 2be1ec7c1b41..77b030f5ec09 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -79,7 +79,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
79 unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); 79 unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
80 80
81 if (kvaddr >= (void *)FIXADDR_START) { 81 if (kvaddr >= (void *)FIXADDR_START) {
82 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); 82 if (cache_is_vivt())
83 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
83#ifdef CONFIG_DEBUG_HIGHMEM 84#ifdef CONFIG_DEBUG_HIGHMEM
84 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 85 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
85 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); 86 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
@@ -124,3 +125,87 @@ struct page *kmap_atomic_to_page(const void *ptr)
124 pte = TOP_PTE(vaddr); 125 pte = TOP_PTE(vaddr);
125 return pte_page(*pte); 126 return pte_page(*pte);
126} 127}
128
129#ifdef CONFIG_CPU_CACHE_VIPT
130
131#include <linux/percpu.h>
132
133/*
134 * The VIVT cache of a highmem page is always flushed before the page
135 * is unmapped. Hence unmapped highmem pages need no cache maintenance
136 * in that case.
137 *
138 * However unmapped pages may still be cached with a VIPT cache, and
139 * it is not possible to perform cache maintenance on them using physical
140 * addresses unfortunately. So we have no choice but to set up a temporary
141 * virtual mapping for that purpose.
142 *
143 * Yet this VIPT cache maintenance may be triggered from DMA support
144 * functions which are possibly called from interrupt context. As we don't
145 * want to keep interrupt disabled all the time when such maintenance is
146 * taking place, we therefore allow for some reentrancy by preserving and
147 * restoring the previous fixmap entry before the interrupted context is
148 * resumed. If the reentrancy depth is 0 then there is no need to restore
149 * the previous fixmap, and leaving the current one in place allow it to
150 * be reused the next time without a TLB flush (common with DMA).
151 */
152
153static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
154
155void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
156{
157 unsigned int idx, cpu = smp_processor_id();
158 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
159 unsigned long vaddr, flags;
160 pte_t pte, *ptep;
161
162 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
163 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
164 ptep = TOP_PTE(vaddr);
165 pte = mk_pte(page, kmap_prot);
166
167 if (!in_interrupt())
168 preempt_disable();
169
170 raw_local_irq_save(flags);
171 (*depth)++;
172 if (pte_val(*ptep) == pte_val(pte)) {
173 *saved_pte = pte;
174 } else {
175 *saved_pte = *ptep;
176 set_pte_ext(ptep, pte, 0);
177 local_flush_tlb_kernel_page(vaddr);
178 }
179 raw_local_irq_restore(flags);
180
181 return (void *)vaddr;
182}
183
184void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
185{
186 unsigned int idx, cpu = smp_processor_id();
187 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
188 unsigned long vaddr, flags;
189 pte_t pte, *ptep;
190
191 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
192 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
193 ptep = TOP_PTE(vaddr);
194 pte = mk_pte(page, kmap_prot);
195
196 BUG_ON(pte_val(*ptep) != pte_val(pte));
197 BUG_ON(*depth <= 0);
198
199 raw_local_irq_save(flags);
200 (*depth)--;
201 if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
202 set_pte_ext(ptep, saved_pte, 0);
203 local_flush_tlb_kernel_page(vaddr);
204 }
205 raw_local_irq_restore(flags);
206
207 if (!in_interrupt())
208 preempt_enable();
209}
210
211#endif /* CONFIG_CPU_CACHE_VIPT */
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9d4da6ac28eb..241c24a1c18f 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -420,6 +420,10 @@ static void __init build_mem_type_table(void)
420 user_pgprot |= L_PTE_SHARED; 420 user_pgprot |= L_PTE_SHARED;
421 kern_pgprot |= L_PTE_SHARED; 421 kern_pgprot |= L_PTE_SHARED;
422 vecs_pgprot |= L_PTE_SHARED; 422 vecs_pgprot |= L_PTE_SHARED;
423 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
424 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
425 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
426 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
423 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 427 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
424 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 428 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
425#endif 429#endif
@@ -1050,10 +1054,12 @@ void setup_mm_for_reboot(char mode)
1050 pgd_t *pgd; 1054 pgd_t *pgd;
1051 int i; 1055 int i;
1052 1056
1053 if (current->mm && current->mm->pgd) 1057 /*
1054 pgd = current->mm->pgd; 1058 * We need to access to user-mode page tables here. For kernel threads
1055 else 1059 * we don't have any user-mode mappings so we use the context that we
1056 pgd = init_mm.pgd; 1060 * "borrowed".
1061 */
1062 pgd = current->active_mm->pgd;
1057 1063
1058 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; 1064 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
1059 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) 1065 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
diff --git a/arch/arm/plat-mxc/include/mach/board-mx31pdk.h b/arch/arm/plat-mxc/include/mach/board-mx31_3ds.h
index 2bbd6ed17f50..da92933a233b 100644
--- a/arch/arm/plat-mxc/include/mach/board-mx31pdk.h
+++ b/arch/arm/plat-mxc/include/mach/board-mx31_3ds.h
@@ -8,8 +8,8 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#ifndef __ASM_ARCH_MXC_BOARD_MX31PDK_H__ 11#ifndef __ASM_ARCH_MXC_BOARD_MX31_3DS_H__
12#define __ASM_ARCH_MXC_BOARD_MX31PDK_H__ 12#define __ASM_ARCH_MXC_BOARD_MX31_3DS_H__
13 13
14/* Definitions for components on the Debug board */ 14/* Definitions for components on the Debug board */
15 15
@@ -56,4 +56,4 @@
56 56
57#define MXC_MAX_EXP_IO_LINES 16 57#define MXC_MAX_EXP_IO_LINES 16
58 58
59#endif /* __ASM_ARCH_MXC_BOARD_MX31PDK_H__ */ 59#endif /* __ASM_ARCH_MXC_BOARD_MX31_3DS_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/mx51.h b/arch/arm/plat-mxc/include/mach/mx51.h
index 771532b6b4a6..5aad344d5651 100644
--- a/arch/arm/plat-mxc/include/mach/mx51.h
+++ b/arch/arm/plat-mxc/include/mach/mx51.h
@@ -14,7 +14,7 @@
14 * FB100000 70000000 1M SPBA 0 14 * FB100000 70000000 1M SPBA 0
15 * FB000000 73F00000 1M AIPS 1 15 * FB000000 73F00000 1M AIPS 1
16 * FB200000 83F00000 1M AIPS 2 16 * FB200000 83F00000 1M AIPS 2
17 * FA100000 8FFFC000 16K TZIC (interrupt controller) 17 * 8FFFC000 16K TZIC (interrupt controller)
18 * 90000000 256M CSD0 SDRAM/DDR 18 * 90000000 256M CSD0 SDRAM/DDR
19 * A0000000 256M CSD1 SDRAM/DDR 19 * A0000000 256M CSD1 SDRAM/DDR
20 * B0000000 128M CS0 Flash 20 * B0000000 128M CS0 Flash
@@ -23,11 +23,17 @@
23 * C8000000 64M CS3 Flash 23 * C8000000 64M CS3 Flash
24 * CC000000 32M CS4 SRAM 24 * CC000000 32M CS4 SRAM
25 * CE000000 32M CS5 SRAM 25 * CE000000 32M CS5 SRAM
26 * F9000000 CFFF0000 64K NFC (NAND Flash AXI) 26 * CFFF0000 64K NFC (NAND Flash AXI)
27 * 27 *
28 */ 28 */
29 29
30/* 30/*
31 * IROM
32 */
33#define MX51_IROM_BASE_ADDR 0x0
34#define MX51_IROM_SIZE SZ_64K
35
36/*
31 * IRAM 37 * IRAM
32 */ 38 */
33#define MX51_IRAM_BASE_ADDR 0x1FFE0000 /* internal ram */ 39#define MX51_IRAM_BASE_ADDR 0x1FFE0000 /* internal ram */
@@ -40,7 +46,6 @@
40 * NFC 46 * NFC
41 */ 47 */
42#define MX51_NFC_AXI_BASE_ADDR 0xCFFF0000 /* NAND flash AXI */ 48#define MX51_NFC_AXI_BASE_ADDR 0xCFFF0000 /* NAND flash AXI */
43#define MX51_NFC_AXI_BASE_ADDR_VIRT 0xF9000000
44#define MX51_NFC_AXI_SIZE SZ_64K 49#define MX51_NFC_AXI_SIZE SZ_64K
45 50
46/* 51/*
@@ -49,9 +54,8 @@
49#define MX51_GPU_BASE_ADDR 0x20000000 54#define MX51_GPU_BASE_ADDR 0x20000000
50#define MX51_GPU2D_BASE_ADDR 0xD0000000 55#define MX51_GPU2D_BASE_ADDR 0xD0000000
51 56
52#define MX51_TZIC_BASE_ADDR 0x8FFFC000 57#define MX51_TZIC_BASE_ADDR_TO1 0x8FFFC000
53#define MX51_TZIC_BASE_ADDR_VIRT 0xFA100000 58#define MX51_TZIC_BASE_ADDR 0xE0000000
54#define MX51_TZIC_SIZE SZ_16K
55 59
56#define MX51_DEBUG_BASE_ADDR 0x60000000 60#define MX51_DEBUG_BASE_ADDR 0x60000000
57#define MX51_DEBUG_BASE_ADDR_VIRT 0xFA200000 61#define MX51_DEBUG_BASE_ADDR_VIRT 0xFA200000
@@ -232,12 +236,10 @@
232#define MX51_IO_ADDRESS(x) \ 236#define MX51_IO_ADDRESS(x) \
233 (void __iomem *) \ 237 (void __iomem *) \
234 (MX51_IS_MODULE(x, IRAM) ? MX51_IRAM_IO_ADDRESS(x) : \ 238 (MX51_IS_MODULE(x, IRAM) ? MX51_IRAM_IO_ADDRESS(x) : \
235 MX51_IS_MODULE(x, TZIC) ? MX51_TZIC_IO_ADDRESS(x) : \
236 MX51_IS_MODULE(x, DEBUG) ? MX51_DEBUG_IO_ADDRESS(x) : \ 239 MX51_IS_MODULE(x, DEBUG) ? MX51_DEBUG_IO_ADDRESS(x) : \
237 MX51_IS_MODULE(x, SPBA0) ? MX51_SPBA0_IO_ADDRESS(x) : \ 240 MX51_IS_MODULE(x, SPBA0) ? MX51_SPBA0_IO_ADDRESS(x) : \
238 MX51_IS_MODULE(x, AIPS1) ? MX51_AIPS1_IO_ADDRESS(x) : \ 241 MX51_IS_MODULE(x, AIPS1) ? MX51_AIPS1_IO_ADDRESS(x) : \
239 MX51_IS_MODULE(x, AIPS2) ? MX51_AIPS2_IO_ADDRESS(x) : \ 242 MX51_IS_MODULE(x, AIPS2) ? MX51_AIPS2_IO_ADDRESS(x) : \
240 MX51_IS_MODULE(x, NFC_AXI) ? MX51_NFC_AXI_IO_ADDRESS(x) : \
241 0xDEADBEEF) 243 0xDEADBEEF)
242 244
243/* 245/*
@@ -246,9 +248,6 @@
246#define MX51_IRAM_IO_ADDRESS(x) \ 248#define MX51_IRAM_IO_ADDRESS(x) \
247 (((x) - MX51_IRAM_BASE_ADDR) + MX51_IRAM_BASE_ADDR_VIRT) 249 (((x) - MX51_IRAM_BASE_ADDR) + MX51_IRAM_BASE_ADDR_VIRT)
248 250
249#define MX51_TZIC_IO_ADDRESS(x) \
250 (((x) - MX51_TZIC_BASE_ADDR) + MX51_TZIC_BASE_ADDR_VIRT)
251
252#define MX51_DEBUG_IO_ADDRESS(x) \ 251#define MX51_DEBUG_IO_ADDRESS(x) \
253 (((x) - MX51_DEBUG_BASE_ADDR) + MX51_DEBUG_BASE_ADDR_VIRT) 252 (((x) - MX51_DEBUG_BASE_ADDR) + MX51_DEBUG_BASE_ADDR_VIRT)
254 253
@@ -261,9 +260,6 @@
261#define MX51_AIPS2_IO_ADDRESS(x) \ 260#define MX51_AIPS2_IO_ADDRESS(x) \
262 (((x) - MX51_AIPS2_BASE_ADDR) + MX51_AIPS2_BASE_ADDR_VIRT) 261 (((x) - MX51_AIPS2_BASE_ADDR) + MX51_AIPS2_BASE_ADDR_VIRT)
263 262
264#define MX51_NFC_AXI_IO_ADDRESS(x) \
265 (((x) - MX51_NFC_AXI_BASE_ADDR) + MX51_NFC_AXI_BASE_ADDR_VIRT)
266
267#define MX51_IS_MEM_DEVICE_NONSHARED(x) 0 263#define MX51_IS_MEM_DEVICE_NONSHARED(x) 0
268 264
269/* 265/*
@@ -443,12 +439,7 @@
443 439
444#if !defined(__ASSEMBLY__) && !defined(__MXC_BOOT_UNCOMPRESS) 440#if !defined(__ASSEMBLY__) && !defined(__MXC_BOOT_UNCOMPRESS)
445 441
446extern unsigned int system_rev; 442extern int mx51_revision(void);
447
448static inline unsigned int mx51_revision(void)
449{
450 return system_rev;
451}
452#endif 443#endif
453 444
454#endif /* __ASM_ARCH_MXC_MX51_H__ */ 445#endif /* __ASM_ARCH_MXC_MX51_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/uncompress.h b/arch/arm/plat-mxc/include/mach/uncompress.h
index 52e476a150ca..b6d3d0fddc48 100644
--- a/arch/arm/plat-mxc/include/mach/uncompress.h
+++ b/arch/arm/plat-mxc/include/mach/uncompress.h
@@ -66,6 +66,7 @@ static inline void flush(void)
66#define MX2X_UART1_BASE_ADDR 0x1000a000 66#define MX2X_UART1_BASE_ADDR 0x1000a000
67#define MX3X_UART1_BASE_ADDR 0x43F90000 67#define MX3X_UART1_BASE_ADDR 0x43F90000
68#define MX3X_UART2_BASE_ADDR 0x43F94000 68#define MX3X_UART2_BASE_ADDR 0x43F94000
69#define MX51_UART1_BASE_ADDR 0x73fbc000
69 70
70static __inline__ void __arch_decomp_setup(unsigned long arch_id) 71static __inline__ void __arch_decomp_setup(unsigned long arch_id)
71{ 72{
@@ -101,6 +102,9 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id)
101 case MACH_TYPE_MAGX_ZN5: 102 case MACH_TYPE_MAGX_ZN5:
102 uart_base = MX3X_UART2_BASE_ADDR; 103 uart_base = MX3X_UART2_BASE_ADDR;
103 break; 104 break;
105 case MACH_TYPE_MX51_BABBAGE:
106 uart_base = MX51_UART1_BASE_ADDR;
107 break;
104 default: 108 default:
105 break; 109 break;
106 } 110 }
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index a420cb949328..315a540c7ce5 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -428,26 +428,6 @@ static void vfp_pm_init(void)
428static inline void vfp_pm_init(void) { } 428static inline void vfp_pm_init(void) { }
429#endif /* CONFIG_PM */ 429#endif /* CONFIG_PM */
430 430
431/*
432 * Synchronise the hardware VFP state of a thread other than current with the
433 * saved one. This function is used by the ptrace mechanism.
434 */
435#ifdef CONFIG_SMP
436void vfp_sync_hwstate(struct thread_info *thread)
437{
438}
439
440void vfp_flush_hwstate(struct thread_info *thread)
441{
442 /*
443 * On SMP systems, the VFP state is automatically saved at every
444 * context switch. We mark the thread VFP state as belonging to a
445 * non-existent CPU so that the saved one will be reloaded when
446 * needed.
447 */
448 thread->vfpstate.hard.cpu = NR_CPUS;
449}
450#else
451void vfp_sync_hwstate(struct thread_info *thread) 431void vfp_sync_hwstate(struct thread_info *thread)
452{ 432{
453 unsigned int cpu = get_cpu(); 433 unsigned int cpu = get_cpu();
@@ -490,9 +470,18 @@ void vfp_flush_hwstate(struct thread_info *thread)
490 last_VFP_context[cpu] = NULL; 470 last_VFP_context[cpu] = NULL;
491 } 471 }
492 472
473#ifdef CONFIG_SMP
474 /*
475 * For SMP we still have to take care of the case where the thread
476 * migrates to another CPU and then back to the original CPU on which
477 * the last VFP user is still the same thread. Mark the thread VFP
478 * state as belonging to a non-existent CPU so that the saved one will
479 * be reloaded in the above case.
480 */
481 thread->vfpstate.hard.cpu = NR_CPUS;
482#endif
493 put_cpu(); 483 put_cpu();
494} 484}
495#endif
496 485
497#include <linux/smp.h> 486#include <linux/smp.h>
498 487
diff --git a/arch/m68k/include/asm/atomic_mm.h b/arch/m68k/include/asm/atomic_mm.h
index 88b7af20a996..d9d2ed647435 100644
--- a/arch/m68k/include/asm/atomic_mm.h
+++ b/arch/m68k/include/asm/atomic_mm.h
@@ -148,14 +148,18 @@ static inline int atomic_xchg(atomic_t *v, int new)
148static inline int atomic_sub_and_test(int i, atomic_t *v) 148static inline int atomic_sub_and_test(int i, atomic_t *v)
149{ 149{
150 char c; 150 char c;
151 __asm__ __volatile__("subl %2,%1; seq %0" : "=d" (c), "+m" (*v): "g" (i)); 151 __asm__ __volatile__("subl %2,%1; seq %0"
152 : "=d" (c), "+m" (*v)
153 : "id" (i));
152 return c != 0; 154 return c != 0;
153} 155}
154 156
155static inline int atomic_add_negative(int i, atomic_t *v) 157static inline int atomic_add_negative(int i, atomic_t *v)
156{ 158{
157 char c; 159 char c;
158 __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i)); 160 __asm__ __volatile__("addl %2,%1; smi %0"
161 : "=d" (c), "+m" (*v)
162 : "id" (i));
159 return c != 0; 163 return c != 0;
160} 164}
161 165
diff --git a/arch/m68k/include/asm/sigcontext.h b/arch/m68k/include/asm/sigcontext.h
index 1320eaa4cc2a..a29dd74a17cb 100644
--- a/arch/m68k/include/asm/sigcontext.h
+++ b/arch/m68k/include/asm/sigcontext.h
@@ -17,13 +17,11 @@ struct sigcontext {
17#ifndef __uClinux__ 17#ifndef __uClinux__
18# ifdef __mcoldfire__ 18# ifdef __mcoldfire__
19 unsigned long sc_fpregs[2][2]; /* room for two fp registers */ 19 unsigned long sc_fpregs[2][2]; /* room for two fp registers */
20 unsigned long sc_fpcntl[3];
21 unsigned char sc_fpstate[16+6*8];
22# else 20# else
23 unsigned long sc_fpregs[2*3]; /* room for two fp registers */ 21 unsigned long sc_fpregs[2*3]; /* room for two fp registers */
22# endif
24 unsigned long sc_fpcntl[3]; 23 unsigned long sc_fpcntl[3];
25 unsigned char sc_fpstate[216]; 24 unsigned char sc_fpstate[216];
26# endif
27#endif 25#endif
28}; 26};
29 27
diff --git a/arch/mips/alchemy/devboards/db1200/setup.c b/arch/mips/alchemy/devboards/db1200/setup.c
index 379536e3abd1..be7e92ea01f3 100644
--- a/arch/mips/alchemy/devboards/db1200/setup.c
+++ b/arch/mips/alchemy/devboards/db1200/setup.c
@@ -60,43 +60,6 @@ void __init board_setup(void)
60 wmb(); 60 wmb();
61} 61}
62 62
63/* use the hexleds to count the number of times the cpu has entered
64 * wait, the dots to indicate whether the CPU is currently idle or
65 * active (dots off = sleeping, dots on = working) for cases where
66 * the number doesn't change for a long(er) period of time.
67 */
68static void db1200_wait(void)
69{
70 __asm__(" .set push \n"
71 " .set mips3 \n"
72 " .set noreorder \n"
73 " cache 0x14, 0(%0) \n"
74 " cache 0x14, 32(%0) \n"
75 " cache 0x14, 64(%0) \n"
76 /* dots off: we're about to call wait */
77 " lui $26, 0xb980 \n"
78 " ori $27, $0, 3 \n"
79 " sb $27, 0x18($26) \n"
80 " sync \n"
81 " nop \n"
82 " wait \n"
83 " nop \n"
84 " nop \n"
85 " nop \n"
86 " nop \n"
87 " nop \n"
88 /* dots on: there's work to do, increment cntr */
89 " lui $26, 0xb980 \n"
90 " sb $0, 0x18($26) \n"
91 " lui $26, 0xb9c0 \n"
92 " lb $27, 0($26) \n"
93 " addiu $27, $27, 1 \n"
94 " sb $27, 0($26) \n"
95 " sync \n"
96 " .set pop \n"
97 : : "r" (db1200_wait));
98}
99
100static int __init db1200_arch_init(void) 63static int __init db1200_arch_init(void)
101{ 64{
102 /* GPIO7 is low-level triggered CPLD cascade */ 65 /* GPIO7 is low-level triggered CPLD cascade */
@@ -110,9 +73,6 @@ static int __init db1200_arch_init(void)
110 irq_to_desc(DB1200_SD0_INSERT_INT)->status |= IRQ_NOAUTOEN; 73 irq_to_desc(DB1200_SD0_INSERT_INT)->status |= IRQ_NOAUTOEN;
111 irq_to_desc(DB1200_SD0_EJECT_INT)->status |= IRQ_NOAUTOEN; 74 irq_to_desc(DB1200_SD0_EJECT_INT)->status |= IRQ_NOAUTOEN;
112 75
113 if (cpu_wait)
114 cpu_wait = db1200_wait;
115
116 return 0; 76 return 0;
117} 77}
118arch_initcall(db1200_arch_init); 78arch_initcall(db1200_arch_init);
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index 246df7aca2e7..2fafc78e5ce1 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -168,7 +168,7 @@ static struct plat_vlynq_data vlynq_high_data = {
168 .on = vlynq_on, 168 .on = vlynq_on,
169 .off = vlynq_off, 169 .off = vlynq_off,
170 }, 170 },
171 .reset_bit = 26, 171 .reset_bit = 16,
172 .gpio_bit = 19, 172 .gpio_bit = 19,
173}; 173};
174 174
@@ -600,6 +600,7 @@ static int __init ar7_register_devices(void)
600 } 600 }
601 601
602 if (ar7_has_high_cpmac()) { 602 if (ar7_has_high_cpmac()) {
603 res = fixed_phy_add(PHY_POLL, cpmac_high.id, &fixed_phy_status);
603 if (!res) { 604 if (!res) {
604 cpmac_get_mac(1, cpmac_high_data.dev_addr); 605 cpmac_get_mac(1, cpmac_high_data.dev_addr);
605 606
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c
index ea17941168ca..8dba8cfb752f 100644
--- a/arch/mips/bcm63xx/boards/board_bcm963xx.c
+++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c
@@ -18,6 +18,7 @@
18#include <asm/addrspace.h> 18#include <asm/addrspace.h>
19#include <bcm63xx_board.h> 19#include <bcm63xx_board.h>
20#include <bcm63xx_cpu.h> 20#include <bcm63xx_cpu.h>
21#include <bcm63xx_dev_uart.h>
21#include <bcm63xx_regs.h> 22#include <bcm63xx_regs.h>
22#include <bcm63xx_io.h> 23#include <bcm63xx_io.h>
23#include <bcm63xx_dev_pci.h> 24#include <bcm63xx_dev_pci.h>
@@ -40,6 +41,7 @@ static struct board_info __initdata board_96338gw = {
40 .name = "96338GW", 41 .name = "96338GW",
41 .expected_cpu_id = 0x6338, 42 .expected_cpu_id = 0x6338,
42 43
44 .has_uart0 = 1,
43 .has_enet0 = 1, 45 .has_enet0 = 1,
44 .enet0 = { 46 .enet0 = {
45 .force_speed_100 = 1, 47 .force_speed_100 = 1,
@@ -82,6 +84,7 @@ static struct board_info __initdata board_96338w = {
82 .name = "96338W", 84 .name = "96338W",
83 .expected_cpu_id = 0x6338, 85 .expected_cpu_id = 0x6338,
84 86
87 .has_uart0 = 1,
85 .has_enet0 = 1, 88 .has_enet0 = 1,
86 .enet0 = { 89 .enet0 = {
87 .force_speed_100 = 1, 90 .force_speed_100 = 1,
@@ -126,6 +129,8 @@ static struct board_info __initdata board_96338w = {
126static struct board_info __initdata board_96345gw2 = { 129static struct board_info __initdata board_96345gw2 = {
127 .name = "96345GW2", 130 .name = "96345GW2",
128 .expected_cpu_id = 0x6345, 131 .expected_cpu_id = 0x6345,
132
133 .has_uart0 = 1,
129}; 134};
130#endif 135#endif
131 136
@@ -137,6 +142,7 @@ static struct board_info __initdata board_96348r = {
137 .name = "96348R", 142 .name = "96348R",
138 .expected_cpu_id = 0x6348, 143 .expected_cpu_id = 0x6348,
139 144
145 .has_uart0 = 1,
140 .has_enet0 = 1, 146 .has_enet0 = 1,
141 .has_pci = 1, 147 .has_pci = 1,
142 148
@@ -180,6 +186,7 @@ static struct board_info __initdata board_96348gw_10 = {
180 .name = "96348GW-10", 186 .name = "96348GW-10",
181 .expected_cpu_id = 0x6348, 187 .expected_cpu_id = 0x6348,
182 188
189 .has_uart0 = 1,
183 .has_enet0 = 1, 190 .has_enet0 = 1,
184 .has_enet1 = 1, 191 .has_enet1 = 1,
185 .has_pci = 1, 192 .has_pci = 1,
@@ -239,6 +246,7 @@ static struct board_info __initdata board_96348gw_11 = {
239 .name = "96348GW-11", 246 .name = "96348GW-11",
240 .expected_cpu_id = 0x6348, 247 .expected_cpu_id = 0x6348,
241 248
249 .has_uart0 = 1,
242 .has_enet0 = 1, 250 .has_enet0 = 1,
243 .has_enet1 = 1, 251 .has_enet1 = 1,
244 .has_pci = 1, 252 .has_pci = 1,
@@ -292,6 +300,7 @@ static struct board_info __initdata board_96348gw = {
292 .name = "96348GW", 300 .name = "96348GW",
293 .expected_cpu_id = 0x6348, 301 .expected_cpu_id = 0x6348,
294 302
303 .has_uart0 = 1,
295 .has_enet0 = 1, 304 .has_enet0 = 1,
296 .has_enet1 = 1, 305 .has_enet1 = 1,
297 .has_pci = 1, 306 .has_pci = 1,
@@ -349,9 +358,10 @@ static struct board_info __initdata board_FAST2404 = {
349 .name = "F@ST2404", 358 .name = "F@ST2404",
350 .expected_cpu_id = 0x6348, 359 .expected_cpu_id = 0x6348,
351 360
352 .has_enet0 = 1, 361 .has_uart0 = 1,
353 .has_enet1 = 1, 362 .has_enet0 = 1,
354 .has_pci = 1, 363 .has_enet1 = 1,
364 .has_pci = 1,
355 365
356 .enet0 = { 366 .enet0 = {
357 .has_phy = 1, 367 .has_phy = 1,
@@ -368,10 +378,30 @@ static struct board_info __initdata board_FAST2404 = {
368 .has_ehci0 = 1, 378 .has_ehci0 = 1,
369}; 379};
370 380
381static struct board_info __initdata board_rta1025w_16 = {
382 .name = "RTA1025W_16",
383 .expected_cpu_id = 0x6348,
384
385 .has_enet0 = 1,
386 .has_enet1 = 1,
387 .has_pci = 1,
388
389 .enet0 = {
390 .has_phy = 1,
391 .use_internal_phy = 1,
392 },
393 .enet1 = {
394 .force_speed_100 = 1,
395 .force_duplex_full = 1,
396 },
397};
398
399
371static struct board_info __initdata board_DV201AMR = { 400static struct board_info __initdata board_DV201AMR = {
372 .name = "DV201AMR", 401 .name = "DV201AMR",
373 .expected_cpu_id = 0x6348, 402 .expected_cpu_id = 0x6348,
374 403
404 .has_uart0 = 1,
375 .has_pci = 1, 405 .has_pci = 1,
376 .has_ohci0 = 1, 406 .has_ohci0 = 1,
377 407
@@ -391,6 +421,7 @@ static struct board_info __initdata board_96348gw_a = {
391 .name = "96348GW-A", 421 .name = "96348GW-A",
392 .expected_cpu_id = 0x6348, 422 .expected_cpu_id = 0x6348,
393 423
424 .has_uart0 = 1,
394 .has_enet0 = 1, 425 .has_enet0 = 1,
395 .has_enet1 = 1, 426 .has_enet1 = 1,
396 .has_pci = 1, 427 .has_pci = 1,
@@ -416,6 +447,7 @@ static struct board_info __initdata board_96358vw = {
416 .name = "96358VW", 447 .name = "96358VW",
417 .expected_cpu_id = 0x6358, 448 .expected_cpu_id = 0x6358,
418 449
450 .has_uart0 = 1,
419 .has_enet0 = 1, 451 .has_enet0 = 1,
420 .has_enet1 = 1, 452 .has_enet1 = 1,
421 .has_pci = 1, 453 .has_pci = 1,
@@ -467,6 +499,7 @@ static struct board_info __initdata board_96358vw2 = {
467 .name = "96358VW2", 499 .name = "96358VW2",
468 .expected_cpu_id = 0x6358, 500 .expected_cpu_id = 0x6358,
469 501
502 .has_uart0 = 1,
470 .has_enet0 = 1, 503 .has_enet0 = 1,
471 .has_enet1 = 1, 504 .has_enet1 = 1,
472 .has_pci = 1, 505 .has_pci = 1,
@@ -514,6 +547,7 @@ static struct board_info __initdata board_AGPFS0 = {
514 .name = "AGPF-S0", 547 .name = "AGPF-S0",
515 .expected_cpu_id = 0x6358, 548 .expected_cpu_id = 0x6358,
516 549
550 .has_uart0 = 1,
517 .has_enet0 = 1, 551 .has_enet0 = 1,
518 .has_enet1 = 1, 552 .has_enet1 = 1,
519 .has_pci = 1, 553 .has_pci = 1,
@@ -531,6 +565,27 @@ static struct board_info __initdata board_AGPFS0 = {
531 .has_ohci0 = 1, 565 .has_ohci0 = 1,
532 .has_ehci0 = 1, 566 .has_ehci0 = 1,
533}; 567};
568
569static struct board_info __initdata board_DWVS0 = {
570 .name = "DWV-S0",
571 .expected_cpu_id = 0x6358,
572
573 .has_enet0 = 1,
574 .has_enet1 = 1,
575 .has_pci = 1,
576
577 .enet0 = {
578 .has_phy = 1,
579 .use_internal_phy = 1,
580 },
581
582 .enet1 = {
583 .force_speed_100 = 1,
584 .force_duplex_full = 1,
585 },
586
587 .has_ohci0 = 1,
588};
534#endif 589#endif
535 590
536/* 591/*
@@ -552,16 +607,88 @@ static const struct board_info __initdata *bcm963xx_boards[] = {
552 &board_FAST2404, 607 &board_FAST2404,
553 &board_DV201AMR, 608 &board_DV201AMR,
554 &board_96348gw_a, 609 &board_96348gw_a,
610 &board_rta1025w_16,
555#endif 611#endif
556 612
557#ifdef CONFIG_BCM63XX_CPU_6358 613#ifdef CONFIG_BCM63XX_CPU_6358
558 &board_96358vw, 614 &board_96358vw,
559 &board_96358vw2, 615 &board_96358vw2,
560 &board_AGPFS0, 616 &board_AGPFS0,
617 &board_DWVS0,
561#endif 618#endif
562}; 619};
563 620
564/* 621/*
622 * Register a sane SPROMv2 to make the on-board
623 * bcm4318 WLAN work
624 */
625#ifdef CONFIG_SSB_PCIHOST
626static struct ssb_sprom bcm63xx_sprom = {
627 .revision = 0x02,
628 .board_rev = 0x17,
629 .country_code = 0x0,
630 .ant_available_bg = 0x3,
631 .pa0b0 = 0x15ae,
632 .pa0b1 = 0xfa85,
633 .pa0b2 = 0xfe8d,
634 .pa1b0 = 0xffff,
635 .pa1b1 = 0xffff,
636 .pa1b2 = 0xffff,
637 .gpio0 = 0xff,
638 .gpio1 = 0xff,
639 .gpio2 = 0xff,
640 .gpio3 = 0xff,
641 .maxpwr_bg = 0x004c,
642 .itssi_bg = 0x00,
643 .boardflags_lo = 0x2848,
644 .boardflags_hi = 0x0000,
645};
646#endif
647
648/*
649 * return board name for /proc/cpuinfo
650 */
651const char *board_get_name(void)
652{
653 return board.name;
654}
655
656/*
657 * register & return a new board mac address
658 */
659static int board_get_mac_address(u8 *mac)
660{
661 u8 *p;
662 int count;
663
664 if (mac_addr_used >= nvram.mac_addr_count) {
665 printk(KERN_ERR PFX "not enough mac address\n");
666 return -ENODEV;
667 }
668
669 memcpy(mac, nvram.mac_addr_base, ETH_ALEN);
670 p = mac + ETH_ALEN - 1;
671 count = mac_addr_used;
672
673 while (count--) {
674 do {
675 (*p)++;
676 if (*p != 0)
677 break;
678 p--;
679 } while (p != mac);
680 }
681
682 if (p == mac) {
683 printk(KERN_ERR PFX "unable to fetch mac address\n");
684 return -ENODEV;
685 }
686
687 mac_addr_used++;
688 return 0;
689}
690
691/*
565 * early init callback, read nvram data from flash and checksum it 692 * early init callback, read nvram data from flash and checksum it
566 */ 693 */
567void __init board_prom_init(void) 694void __init board_prom_init(void)
@@ -659,6 +786,17 @@ void __init board_prom_init(void)
659 } 786 }
660 787
661 bcm_gpio_writel(val, GPIO_MODE_REG); 788 bcm_gpio_writel(val, GPIO_MODE_REG);
789
790 /* Generate MAC address for WLAN and
791 * register our SPROM */
792#ifdef CONFIG_SSB_PCIHOST
793 if (!board_get_mac_address(bcm63xx_sprom.il0mac)) {
794 memcpy(bcm63xx_sprom.et0mac, bcm63xx_sprom.il0mac, ETH_ALEN);
795 memcpy(bcm63xx_sprom.et1mac, bcm63xx_sprom.il0mac, ETH_ALEN);
796 if (ssb_arch_set_fallback_sprom(&bcm63xx_sprom) < 0)
797 printk(KERN_ERR "failed to register fallback SPROM\n");
798 }
799#endif
662} 800}
663 801
664/* 802/*
@@ -676,49 +814,6 @@ void __init board_setup(void)
676 panic("unexpected CPU for bcm963xx board"); 814 panic("unexpected CPU for bcm963xx board");
677} 815}
678 816
679/*
680 * return board name for /proc/cpuinfo
681 */
682const char *board_get_name(void)
683{
684 return board.name;
685}
686
687/*
688 * register & return a new board mac address
689 */
690static int board_get_mac_address(u8 *mac)
691{
692 u8 *p;
693 int count;
694
695 if (mac_addr_used >= nvram.mac_addr_count) {
696 printk(KERN_ERR PFX "not enough mac address\n");
697 return -ENODEV;
698 }
699
700 memcpy(mac, nvram.mac_addr_base, ETH_ALEN);
701 p = mac + ETH_ALEN - 1;
702 count = mac_addr_used;
703
704 while (count--) {
705 do {
706 (*p)++;
707 if (*p != 0)
708 break;
709 p--;
710 } while (p != mac);
711 }
712
713 if (p == mac) {
714 printk(KERN_ERR PFX "unable to fetch mac address\n");
715 return -ENODEV;
716 }
717
718 mac_addr_used++;
719 return 0;
720}
721
722static struct mtd_partition mtd_partitions[] = { 817static struct mtd_partition mtd_partitions[] = {
723 { 818 {
724 .name = "cfe", 819 .name = "cfe",
@@ -750,33 +845,6 @@ static struct platform_device mtd_dev = {
750 }, 845 },
751}; 846};
752 847
753/*
754 * Register a sane SPROMv2 to make the on-board
755 * bcm4318 WLAN work
756 */
757#ifdef CONFIG_SSB_PCIHOST
758static struct ssb_sprom bcm63xx_sprom = {
759 .revision = 0x02,
760 .board_rev = 0x17,
761 .country_code = 0x0,
762 .ant_available_bg = 0x3,
763 .pa0b0 = 0x15ae,
764 .pa0b1 = 0xfa85,
765 .pa0b2 = 0xfe8d,
766 .pa1b0 = 0xffff,
767 .pa1b1 = 0xffff,
768 .pa1b2 = 0xffff,
769 .gpio0 = 0xff,
770 .gpio1 = 0xff,
771 .gpio2 = 0xff,
772 .gpio3 = 0xff,
773 .maxpwr_bg = 0x004c,
774 .itssi_bg = 0x00,
775 .boardflags_lo = 0x2848,
776 .boardflags_hi = 0x0000,
777};
778#endif
779
780static struct gpio_led_platform_data bcm63xx_led_data; 848static struct gpio_led_platform_data bcm63xx_led_data;
781 849
782static struct platform_device bcm63xx_gpio_leds = { 850static struct platform_device bcm63xx_gpio_leds = {
@@ -792,6 +860,12 @@ int __init board_register_devices(void)
792{ 860{
793 u32 val; 861 u32 val;
794 862
863 if (board.has_uart0)
864 bcm63xx_uart_register(0);
865
866 if (board.has_uart1)
867 bcm63xx_uart_register(1);
868
795 if (board.has_pccard) 869 if (board.has_pccard)
796 bcm63xx_pcmcia_register(); 870 bcm63xx_pcmcia_register();
797 871
@@ -806,17 +880,6 @@ int __init board_register_devices(void)
806 if (board.has_dsp) 880 if (board.has_dsp)
807 bcm63xx_dsp_register(&board.dsp); 881 bcm63xx_dsp_register(&board.dsp);
808 882
809 /* Generate MAC address for WLAN and
810 * register our SPROM */
811#ifdef CONFIG_SSB_PCIHOST
812 if (!board_get_mac_address(bcm63xx_sprom.il0mac)) {
813 memcpy(bcm63xx_sprom.et0mac, bcm63xx_sprom.il0mac, ETH_ALEN);
814 memcpy(bcm63xx_sprom.et1mac, bcm63xx_sprom.il0mac, ETH_ALEN);
815 if (ssb_arch_set_fallback_sprom(&bcm63xx_sprom) < 0)
816 printk(KERN_ERR "failed to register fallback SPROM\n");
817 }
818#endif
819
820 /* read base address of boot chip select (0) */ 883 /* read base address of boot chip select (0) */
821 if (BCMCPU_IS_6345()) 884 if (BCMCPU_IS_6345())
822 val = 0x1fc00000; 885 val = 0x1fc00000;
diff --git a/arch/mips/bcm63xx/cpu.c b/arch/mips/bcm63xx/cpu.c
index 70378bb5e3f9..cbb7caf86d77 100644
--- a/arch/mips/bcm63xx/cpu.c
+++ b/arch/mips/bcm63xx/cpu.c
@@ -36,6 +36,7 @@ static const unsigned long bcm96338_regs_base[] = {
36 [RSET_TIMER] = BCM_6338_TIMER_BASE, 36 [RSET_TIMER] = BCM_6338_TIMER_BASE,
37 [RSET_WDT] = BCM_6338_WDT_BASE, 37 [RSET_WDT] = BCM_6338_WDT_BASE,
38 [RSET_UART0] = BCM_6338_UART0_BASE, 38 [RSET_UART0] = BCM_6338_UART0_BASE,
39 [RSET_UART1] = BCM_6338_UART1_BASE,
39 [RSET_GPIO] = BCM_6338_GPIO_BASE, 40 [RSET_GPIO] = BCM_6338_GPIO_BASE,
40 [RSET_SPI] = BCM_6338_SPI_BASE, 41 [RSET_SPI] = BCM_6338_SPI_BASE,
41 [RSET_OHCI0] = BCM_6338_OHCI0_BASE, 42 [RSET_OHCI0] = BCM_6338_OHCI0_BASE,
@@ -72,6 +73,7 @@ static const unsigned long bcm96345_regs_base[] = {
72 [RSET_TIMER] = BCM_6345_TIMER_BASE, 73 [RSET_TIMER] = BCM_6345_TIMER_BASE,
73 [RSET_WDT] = BCM_6345_WDT_BASE, 74 [RSET_WDT] = BCM_6345_WDT_BASE,
74 [RSET_UART0] = BCM_6345_UART0_BASE, 75 [RSET_UART0] = BCM_6345_UART0_BASE,
76 [RSET_UART1] = BCM_6345_UART1_BASE,
75 [RSET_GPIO] = BCM_6345_GPIO_BASE, 77 [RSET_GPIO] = BCM_6345_GPIO_BASE,
76 [RSET_SPI] = BCM_6345_SPI_BASE, 78 [RSET_SPI] = BCM_6345_SPI_BASE,
77 [RSET_UDC0] = BCM_6345_UDC0_BASE, 79 [RSET_UDC0] = BCM_6345_UDC0_BASE,
@@ -109,6 +111,7 @@ static const unsigned long bcm96348_regs_base[] = {
109 [RSET_TIMER] = BCM_6348_TIMER_BASE, 111 [RSET_TIMER] = BCM_6348_TIMER_BASE,
110 [RSET_WDT] = BCM_6348_WDT_BASE, 112 [RSET_WDT] = BCM_6348_WDT_BASE,
111 [RSET_UART0] = BCM_6348_UART0_BASE, 113 [RSET_UART0] = BCM_6348_UART0_BASE,
114 [RSET_UART1] = BCM_6348_UART1_BASE,
112 [RSET_GPIO] = BCM_6348_GPIO_BASE, 115 [RSET_GPIO] = BCM_6348_GPIO_BASE,
113 [RSET_SPI] = BCM_6348_SPI_BASE, 116 [RSET_SPI] = BCM_6348_SPI_BASE,
114 [RSET_OHCI0] = BCM_6348_OHCI0_BASE, 117 [RSET_OHCI0] = BCM_6348_OHCI0_BASE,
@@ -150,6 +153,7 @@ static const unsigned long bcm96358_regs_base[] = {
150 [RSET_TIMER] = BCM_6358_TIMER_BASE, 153 [RSET_TIMER] = BCM_6358_TIMER_BASE,
151 [RSET_WDT] = BCM_6358_WDT_BASE, 154 [RSET_WDT] = BCM_6358_WDT_BASE,
152 [RSET_UART0] = BCM_6358_UART0_BASE, 155 [RSET_UART0] = BCM_6358_UART0_BASE,
156 [RSET_UART1] = BCM_6358_UART1_BASE,
153 [RSET_GPIO] = BCM_6358_GPIO_BASE, 157 [RSET_GPIO] = BCM_6358_GPIO_BASE,
154 [RSET_SPI] = BCM_6358_SPI_BASE, 158 [RSET_SPI] = BCM_6358_SPI_BASE,
155 [RSET_OHCI0] = BCM_6358_OHCI0_BASE, 159 [RSET_OHCI0] = BCM_6358_OHCI0_BASE,
@@ -170,6 +174,7 @@ static const unsigned long bcm96358_regs_base[] = {
170static const int bcm96358_irqs[] = { 174static const int bcm96358_irqs[] = {
171 [IRQ_TIMER] = BCM_6358_TIMER_IRQ, 175 [IRQ_TIMER] = BCM_6358_TIMER_IRQ,
172 [IRQ_UART0] = BCM_6358_UART0_IRQ, 176 [IRQ_UART0] = BCM_6358_UART0_IRQ,
177 [IRQ_UART1] = BCM_6358_UART1_IRQ,
173 [IRQ_DSL] = BCM_6358_DSL_IRQ, 178 [IRQ_DSL] = BCM_6358_DSL_IRQ,
174 [IRQ_ENET0] = BCM_6358_ENET0_IRQ, 179 [IRQ_ENET0] = BCM_6358_ENET0_IRQ,
175 [IRQ_ENET1] = BCM_6358_ENET1_IRQ, 180 [IRQ_ENET1] = BCM_6358_ENET1_IRQ,
diff --git a/arch/mips/bcm63xx/dev-uart.c b/arch/mips/bcm63xx/dev-uart.c
index b0519461ad9b..c2963da0253e 100644
--- a/arch/mips/bcm63xx/dev-uart.c
+++ b/arch/mips/bcm63xx/dev-uart.c
@@ -11,31 +11,65 @@
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <bcm63xx_cpu.h> 12#include <bcm63xx_cpu.h>
13 13
14static struct resource uart_resources[] = { 14static struct resource uart0_resources[] = {
15 { 15 {
16 .start = -1, /* filled at runtime */ 16 /* start & end filled at runtime */
17 .end = -1, /* filled at runtime */
18 .flags = IORESOURCE_MEM, 17 .flags = IORESOURCE_MEM,
19 }, 18 },
20 { 19 {
21 .start = -1, /* filled at runtime */ 20 /* start filled at runtime */
22 .flags = IORESOURCE_IRQ, 21 .flags = IORESOURCE_IRQ,
23 }, 22 },
24}; 23};
25 24
26static struct platform_device bcm63xx_uart_device = { 25static struct resource uart1_resources[] = {
27 .name = "bcm63xx_uart", 26 {
28 .id = 0, 27 /* start & end filled at runtime */
29 .num_resources = ARRAY_SIZE(uart_resources), 28 .flags = IORESOURCE_MEM,
30 .resource = uart_resources, 29 },
30 {
31 /* start filled at runtime */
32 .flags = IORESOURCE_IRQ,
33 },
34};
35
36static struct platform_device bcm63xx_uart_devices[] = {
37 {
38 .name = "bcm63xx_uart",
39 .id = 0,
40 .num_resources = ARRAY_SIZE(uart0_resources),
41 .resource = uart0_resources,
42 },
43
44 {
45 .name = "bcm63xx_uart",
46 .id = 1,
47 .num_resources = ARRAY_SIZE(uart1_resources),
48 .resource = uart1_resources,
49 }
31}; 50};
32 51
33int __init bcm63xx_uart_register(void) 52int __init bcm63xx_uart_register(unsigned int id)
34{ 53{
35 uart_resources[0].start = bcm63xx_regset_address(RSET_UART0); 54 if (id >= ARRAY_SIZE(bcm63xx_uart_devices))
36 uart_resources[0].end = uart_resources[0].start; 55 return -ENODEV;
37 uart_resources[0].end += RSET_UART_SIZE - 1; 56
38 uart_resources[1].start = bcm63xx_get_irq_number(IRQ_UART0); 57 if (id == 1 && !BCMCPU_IS_6358())
39 return platform_device_register(&bcm63xx_uart_device); 58 return -ENODEV;
59
60 if (id == 0) {
61 uart0_resources[0].start = bcm63xx_regset_address(RSET_UART0);
62 uart0_resources[0].end = uart0_resources[0].start +
63 RSET_UART_SIZE - 1;
64 uart0_resources[1].start = bcm63xx_get_irq_number(IRQ_UART0);
65 }
66
67 if (id == 1) {
68 uart1_resources[0].start = bcm63xx_regset_address(RSET_UART1);
69 uart1_resources[0].end = uart1_resources[0].start +
70 RSET_UART_SIZE - 1;
71 uart1_resources[1].start = bcm63xx_get_irq_number(IRQ_UART1);
72 }
73
74 return platform_device_register(&bcm63xx_uart_devices[id]);
40} 75}
41arch_initcall(bcm63xx_uart_register);
diff --git a/arch/mips/bcm63xx/gpio.c b/arch/mips/bcm63xx/gpio.c
index 87ca39046334..315bc7f79ce1 100644
--- a/arch/mips/bcm63xx/gpio.c
+++ b/arch/mips/bcm63xx/gpio.c
@@ -125,10 +125,10 @@ static struct gpio_chip bcm63xx_gpio_chip = {
125 125
126int __init bcm63xx_gpio_init(void) 126int __init bcm63xx_gpio_init(void)
127{ 127{
128 gpio_out_low = bcm_gpio_readl(GPIO_DATA_LO_REG);
129 gpio_out_high = bcm_gpio_readl(GPIO_DATA_HI_REG);
128 bcm63xx_gpio_chip.ngpio = bcm63xx_gpio_count(); 130 bcm63xx_gpio_chip.ngpio = bcm63xx_gpio_count();
129 pr_info("registering %d GPIOs\n", bcm63xx_gpio_chip.ngpio); 131 pr_info("registering %d GPIOs\n", bcm63xx_gpio_chip.ngpio);
130 132
131 return gpiochip_add(&bcm63xx_gpio_chip); 133 return gpiochip_add(&bcm63xx_gpio_chip);
132} 134}
133
134arch_initcall(bcm63xx_gpio_init);
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index b321d3b16877..9a06fa9f9f0c 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -45,9 +45,6 @@ extern struct plat_smp_ops octeon_smp_ops;
45extern void pci_console_init(const char *arg); 45extern void pci_console_init(const char *arg);
46#endif 46#endif
47 47
48#ifdef CONFIG_CAVIUM_RESERVE32
49extern uint64_t octeon_reserve32_memory;
50#endif
51static unsigned long long MAX_MEMORY = 512ull << 20; 48static unsigned long long MAX_MEMORY = 512ull << 20;
52 49
53struct octeon_boot_descriptor *octeon_boot_desc_ptr; 50struct octeon_boot_descriptor *octeon_boot_desc_ptr;
@@ -186,54 +183,6 @@ void octeon_check_cpu_bist(void)
186 write_octeon_c0_dcacheerr(0); 183 write_octeon_c0_dcacheerr(0);
187} 184}
188 185
189#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
190/**
191 * Called on every core to setup the wired tlb entry needed
192 * if CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB is set.
193 *
194 */
195static void octeon_hal_setup_per_cpu_reserved32(void *unused)
196{
197 /*
198 * The config has selected to wire the reserve32 memory for all
199 * userspace applications. We need to put a wired TLB entry in for each
200 * 512MB of reserve32 memory. We only handle double 256MB pages here,
201 * so reserve32 must be multiple of 512MB.
202 */
203 uint32_t size = CONFIG_CAVIUM_RESERVE32;
204 uint32_t entrylo0 =
205 0x7 | ((octeon_reserve32_memory & ((1ul << 40) - 1)) >> 6);
206 uint32_t entrylo1 = entrylo0 + (256 << 14);
207 uint32_t entryhi = (0x80000000UL - (CONFIG_CAVIUM_RESERVE32 << 20));
208 while (size >= 512) {
209#if 0
210 pr_info("CPU%d: Adding double wired TLB entry for 0x%lx\n",
211 smp_processor_id(), entryhi);
212#endif
213 add_wired_entry(entrylo0, entrylo1, entryhi, PM_256M);
214 entrylo0 += 512 << 14;
215 entrylo1 += 512 << 14;
216 entryhi += 512 << 20;
217 size -= 512;
218 }
219}
220#endif /* CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB */
221
222/**
223 * Called to release the named block which was used to made sure
224 * that nobody used the memory for something else during
225 * init. Now we'll free it so userspace apps can use this
226 * memory region with bootmem_alloc.
227 *
228 * This function is called only once from prom_free_prom_memory().
229 */
230void octeon_hal_setup_reserved32(void)
231{
232#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
233 on_each_cpu(octeon_hal_setup_per_cpu_reserved32, NULL, 0, 1);
234#endif
235}
236
237/** 186/**
238 * Reboot Octeon 187 * Reboot Octeon
239 * 188 *
@@ -294,18 +243,6 @@ static void octeon_halt(void)
294 octeon_kill_core(NULL); 243 octeon_kill_core(NULL);
295} 244}
296 245
297#if 0
298/**
299 * Platform time init specifics.
300 * Returns
301 */
302void __init plat_time_init(void)
303{
304 /* Nothing special here, but we are required to have one */
305}
306
307#endif
308
309/** 246/**
310 * Handle all the error condition interrupts that might occur. 247 * Handle all the error condition interrupts that might occur.
311 * 248 *
@@ -502,25 +439,13 @@ void __init prom_init(void)
502 * memory when it is getting memory from the 439 * memory when it is getting memory from the
503 * bootloader. Later, after the memory allocations are 440 * bootloader. Later, after the memory allocations are
504 * complete, the reserve32 will be freed. 441 * complete, the reserve32 will be freed.
505 */ 442 *
506#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
507 if (CONFIG_CAVIUM_RESERVE32 & 0x1ff)
508 pr_err("CAVIUM_RESERVE32 isn't a multiple of 512MB. "
509 "This is required if CAVIUM_RESERVE32_USE_WIRED_TLB "
510 "is set\n");
511 else
512 addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
513 0, 0, 512 << 20,
514 "CAVIUM_RESERVE32", 0);
515#else
516 /*
517 * Allocate memory for RESERVED32 aligned on 2MB boundary. This 443 * Allocate memory for RESERVED32 aligned on 2MB boundary. This
518 * is in case we later use hugetlb entries with it. 444 * is in case we later use hugetlb entries with it.
519 */ 445 */
520 addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20, 446 addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
521 0, 0, 2 << 20, 447 0, 0, 2 << 20,
522 "CAVIUM_RESERVE32", 0); 448 "CAVIUM_RESERVE32", 0);
523#endif
524 if (addr < 0) 449 if (addr < 0)
525 pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n"); 450 pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
526 else 451 else
@@ -817,9 +742,4 @@ void prom_free_prom_memory(void)
817 panic("Unable to request_irq(OCTEON_IRQ_RML)\n"); 742 panic("Unable to request_irq(OCTEON_IRQ_RML)\n");
818 } 743 }
819#endif 744#endif
820
821 /* This call is here so that it is performed after any TLB
822 initializations. It needs to be after these in case the
823 CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB option is set */
824 octeon_hal_setup_reserved32();
825} 745}
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 51e980290ce1..6d99b9d8887d 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -279,14 +279,6 @@ static void octeon_cpu_die(unsigned int cpu)
279 uint32_t avail_coremask; 279 uint32_t avail_coremask;
280 struct cvmx_bootmem_named_block_desc *block_desc; 280 struct cvmx_bootmem_named_block_desc *block_desc;
281 281
282#ifdef CONFIG_CAVIUM_OCTEON_WATCHDOG
283 /* Disable the watchdog */
284 cvmx_ciu_wdogx_t ciu_wdog;
285 ciu_wdog.u64 = cvmx_read_csr(CVMX_CIU_WDOGX(cpu));
286 ciu_wdog.s.mode = 0;
287 cvmx_write_csr(CVMX_CIU_WDOGX(cpu), ciu_wdog.u64);
288#endif
289
290 while (per_cpu(cpu_state, cpu) != CPU_DEAD) 282 while (per_cpu(cpu_state, cpu) != CPU_DEAD)
291 cpu_relax(); 283 cpu_relax();
292 284
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
index c2f06e38c854..0583bb29150f 100644
--- a/arch/mips/configs/bigsur_defconfig
+++ b/arch/mips/configs/bigsur_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26-rc8 3# Linux kernel version: 2.6.34-rc3
4# Wed Jul 2 17:02:55 2008 4# Sat Apr 3 16:32:11 2010
5# 5#
6CONFIG_MIPS=y 6CONFIG_MIPS=y
7 7
@@ -9,20 +9,25 @@ CONFIG_MIPS=y
9# Machine selection 9# Machine selection
10# 10#
11# CONFIG_MACH_ALCHEMY is not set 11# CONFIG_MACH_ALCHEMY is not set
12# CONFIG_AR7 is not set
12# CONFIG_BCM47XX is not set 13# CONFIG_BCM47XX is not set
14# CONFIG_BCM63XX is not set
13# CONFIG_MIPS_COBALT is not set 15# CONFIG_MIPS_COBALT is not set
14# CONFIG_MACH_DECSTATION is not set 16# CONFIG_MACH_DECSTATION is not set
15# CONFIG_MACH_JAZZ is not set 17# CONFIG_MACH_JAZZ is not set
16# CONFIG_LASAT is not set 18# CONFIG_LASAT is not set
17# CONFIG_LEMOTE_FULONG is not set 19# CONFIG_MACH_LOONGSON is not set
18# CONFIG_MIPS_MALTA is not set 20# CONFIG_MIPS_MALTA is not set
19# CONFIG_MIPS_SIM is not set 21# CONFIG_MIPS_SIM is not set
20# CONFIG_MARKEINS is not set 22# CONFIG_NEC_MARKEINS is not set
21# CONFIG_MACH_VR41XX is not set 23# CONFIG_MACH_VR41XX is not set
24# CONFIG_NXP_STB220 is not set
25# CONFIG_NXP_STB225 is not set
22# CONFIG_PNX8550_JBS is not set 26# CONFIG_PNX8550_JBS is not set
23# CONFIG_PNX8550_STB810 is not set 27# CONFIG_PNX8550_STB810 is not set
24# CONFIG_PMC_MSP is not set 28# CONFIG_PMC_MSP is not set
25# CONFIG_PMC_YOSEMITE is not set 29# CONFIG_PMC_YOSEMITE is not set
30# CONFIG_POWERTV is not set
26# CONFIG_SGI_IP22 is not set 31# CONFIG_SGI_IP22 is not set
27# CONFIG_SGI_IP27 is not set 32# CONFIG_SGI_IP27 is not set
28# CONFIG_SGI_IP28 is not set 33# CONFIG_SGI_IP28 is not set
@@ -36,10 +41,13 @@ CONFIG_MIPS=y
36# CONFIG_SIBYTE_SENTOSA is not set 41# CONFIG_SIBYTE_SENTOSA is not set
37CONFIG_SIBYTE_BIGSUR=y 42CONFIG_SIBYTE_BIGSUR=y
38# CONFIG_SNI_RM is not set 43# CONFIG_SNI_RM is not set
39# CONFIG_TOSHIBA_JMR3927 is not set 44# CONFIG_MACH_TX39XX is not set
40# CONFIG_TOSHIBA_RBTX4927 is not set 45# CONFIG_MACH_TX49XX is not set
41# CONFIG_TOSHIBA_RBTX4938 is not set 46# CONFIG_MIKROTIK_RB532 is not set
42# CONFIG_WR_PPMC is not set 47# CONFIG_WR_PPMC is not set
48# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
49# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
50# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
43CONFIG_SIBYTE_BCM1x80=y 51CONFIG_SIBYTE_BCM1x80=y
44CONFIG_SIBYTE_SB1xxx_SOC=y 52CONFIG_SIBYTE_SB1xxx_SOC=y
45# CONFIG_CPU_SB1_PASS_1 is not set 53# CONFIG_CPU_SB1_PASS_1 is not set
@@ -48,14 +56,13 @@ CONFIG_SIBYTE_SB1xxx_SOC=y
48# CONFIG_CPU_SB1_PASS_4 is not set 56# CONFIG_CPU_SB1_PASS_4 is not set
49# CONFIG_CPU_SB1_PASS_2_112x is not set 57# CONFIG_CPU_SB1_PASS_2_112x is not set
50# CONFIG_CPU_SB1_PASS_3 is not set 58# CONFIG_CPU_SB1_PASS_3 is not set
51# CONFIG_SIMULATION is not set
52# CONFIG_SB1_CEX_ALWAYS_FATAL is not set 59# CONFIG_SB1_CEX_ALWAYS_FATAL is not set
53# CONFIG_SB1_CERR_STALL is not set 60# CONFIG_SB1_CERR_STALL is not set
54CONFIG_SIBYTE_CFE=y
55# CONFIG_SIBYTE_CFE_CONSOLE is not set 61# CONFIG_SIBYTE_CFE_CONSOLE is not set
56# CONFIG_SIBYTE_BUS_WATCHER is not set 62# CONFIG_SIBYTE_BUS_WATCHER is not set
57# CONFIG_SIBYTE_TBPROF is not set 63# CONFIG_SIBYTE_TBPROF is not set
58CONFIG_SIBYTE_HAS_ZBUS_PROFILING=y 64CONFIG_SIBYTE_HAS_ZBUS_PROFILING=y
65CONFIG_LOONGSON_UART_BASE=y
59CONFIG_RWSEM_GENERIC_SPINLOCK=y 66CONFIG_RWSEM_GENERIC_SPINLOCK=y
60# CONFIG_ARCH_HAS_ILOG2_U32 is not set 67# CONFIG_ARCH_HAS_ILOG2_U32 is not set
61# CONFIG_ARCH_HAS_ILOG2_U64 is not set 68# CONFIG_ARCH_HAS_ILOG2_U64 is not set
@@ -66,15 +73,13 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y
66CONFIG_GENERIC_CLOCKEVENTS=y 73CONFIG_GENERIC_CLOCKEVENTS=y
67CONFIG_GENERIC_TIME=y 74CONFIG_GENERIC_TIME=y
68CONFIG_GENERIC_CMOS_UPDATE=y 75CONFIG_GENERIC_CMOS_UPDATE=y
69CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y 76CONFIG_SCHED_OMIT_FRAME_POINTER=y
70# CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set 77CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
71CONFIG_CEVT_BCM1480=y 78CONFIG_CEVT_BCM1480=y
72CONFIG_CSRC_BCM1480=y 79CONFIG_CSRC_BCM1480=y
73CONFIG_CFE=y 80CONFIG_CFE=y
74CONFIG_DMA_COHERENT=y 81CONFIG_DMA_COHERENT=y
75CONFIG_EARLY_PRINTK=y
76CONFIG_SYS_HAS_EARLY_PRINTK=y 82CONFIG_SYS_HAS_EARLY_PRINTK=y
77# CONFIG_HOTPLUG_CPU is not set
78# CONFIG_NO_IOPORT is not set 83# CONFIG_NO_IOPORT is not set
79CONFIG_CPU_BIG_ENDIAN=y 84CONFIG_CPU_BIG_ENDIAN=y
80# CONFIG_CPU_LITTLE_ENDIAN is not set 85# CONFIG_CPU_LITTLE_ENDIAN is not set
@@ -88,7 +93,8 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5
88# 93#
89# CPU selection 94# CPU selection
90# 95#
91# CONFIG_CPU_LOONGSON2 is not set 96# CONFIG_CPU_LOONGSON2E is not set
97# CONFIG_CPU_LOONGSON2F is not set
92# CONFIG_CPU_MIPS32_R1 is not set 98# CONFIG_CPU_MIPS32_R1 is not set
93# CONFIG_CPU_MIPS32_R2 is not set 99# CONFIG_CPU_MIPS32_R2 is not set
94# CONFIG_CPU_MIPS64_R1 is not set 100# CONFIG_CPU_MIPS64_R1 is not set
@@ -101,6 +107,7 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5
101# CONFIG_CPU_TX49XX is not set 107# CONFIG_CPU_TX49XX is not set
102# CONFIG_CPU_R5000 is not set 108# CONFIG_CPU_R5000 is not set
103# CONFIG_CPU_R5432 is not set 109# CONFIG_CPU_R5432 is not set
110# CONFIG_CPU_R5500 is not set
104# CONFIG_CPU_R6000 is not set 111# CONFIG_CPU_R6000 is not set
105# CONFIG_CPU_NEVADA is not set 112# CONFIG_CPU_NEVADA is not set
106# CONFIG_CPU_R8000 is not set 113# CONFIG_CPU_R8000 is not set
@@ -108,6 +115,7 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5
108# CONFIG_CPU_RM7000 is not set 115# CONFIG_CPU_RM7000 is not set
109# CONFIG_CPU_RM9000 is not set 116# CONFIG_CPU_RM9000 is not set
110CONFIG_CPU_SB1=y 117CONFIG_CPU_SB1=y
118# CONFIG_CPU_CAVIUM_OCTEON is not set
111CONFIG_SYS_HAS_CPU_SB1=y 119CONFIG_SYS_HAS_CPU_SB1=y
112CONFIG_WEAK_ORDERING=y 120CONFIG_WEAK_ORDERING=y
113CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y 121CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
@@ -123,11 +131,13 @@ CONFIG_64BIT=y
123CONFIG_PAGE_SIZE_4KB=y 131CONFIG_PAGE_SIZE_4KB=y
124# CONFIG_PAGE_SIZE_8KB is not set 132# CONFIG_PAGE_SIZE_8KB is not set
125# CONFIG_PAGE_SIZE_16KB is not set 133# CONFIG_PAGE_SIZE_16KB is not set
134# CONFIG_PAGE_SIZE_32KB is not set
126# CONFIG_PAGE_SIZE_64KB is not set 135# CONFIG_PAGE_SIZE_64KB is not set
127# CONFIG_SIBYTE_DMA_PAGEOPS is not set 136# CONFIG_SIBYTE_DMA_PAGEOPS is not set
128CONFIG_MIPS_MT_DISABLED=y 137CONFIG_MIPS_MT_DISABLED=y
129# CONFIG_MIPS_MT_SMP is not set 138# CONFIG_MIPS_MT_SMP is not set
130# CONFIG_MIPS_MT_SMTC is not set 139# CONFIG_MIPS_MT_SMTC is not set
140# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
131CONFIG_CPU_HAS_SYNC=y 141CONFIG_CPU_HAS_SYNC=y
132CONFIG_GENERIC_HARDIRQS=y 142CONFIG_GENERIC_HARDIRQS=y
133CONFIG_GENERIC_IRQ_PROBE=y 143CONFIG_GENERIC_IRQ_PROBE=y
@@ -142,18 +152,17 @@ CONFIG_FLATMEM_MANUAL=y
142# CONFIG_SPARSEMEM_MANUAL is not set 152# CONFIG_SPARSEMEM_MANUAL is not set
143CONFIG_FLATMEM=y 153CONFIG_FLATMEM=y
144CONFIG_FLAT_NODE_MEM_MAP=y 154CONFIG_FLAT_NODE_MEM_MAP=y
145# CONFIG_SPARSEMEM_STATIC is not set
146# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
147CONFIG_PAGEFLAGS_EXTENDED=y 155CONFIG_PAGEFLAGS_EXTENDED=y
148CONFIG_SPLIT_PTLOCK_CPUS=4 156CONFIG_SPLIT_PTLOCK_CPUS=4
149CONFIG_RESOURCES_64BIT=y 157CONFIG_PHYS_ADDR_T_64BIT=y
150CONFIG_ZONE_DMA_FLAG=0 158CONFIG_ZONE_DMA_FLAG=0
151CONFIG_VIRT_TO_BUS=y 159CONFIG_VIRT_TO_BUS=y
160# CONFIG_KSM is not set
161CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
152CONFIG_SMP=y 162CONFIG_SMP=y
153CONFIG_SYS_SUPPORTS_SMP=y 163CONFIG_SYS_SUPPORTS_SMP=y
154CONFIG_NR_CPUS_DEFAULT_4=y 164CONFIG_NR_CPUS_DEFAULT_4=y
155CONFIG_NR_CPUS=4 165CONFIG_NR_CPUS=4
156# CONFIG_MIPS_CMP is not set
157CONFIG_TICK_ONESHOT=y 166CONFIG_TICK_ONESHOT=y
158CONFIG_NO_HZ=y 167CONFIG_NO_HZ=y
159CONFIG_HIGH_RES_TIMERS=y 168CONFIG_HIGH_RES_TIMERS=y
@@ -175,6 +184,7 @@ CONFIG_SECCOMP=y
175CONFIG_LOCKDEP_SUPPORT=y 184CONFIG_LOCKDEP_SUPPORT=y
176CONFIG_STACKTRACE_SUPPORT=y 185CONFIG_STACKTRACE_SUPPORT=y
177CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 186CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
187CONFIG_CONSTRUCTORS=y
178 188
179# 189#
180# General setup 190# General setup
@@ -188,6 +198,7 @@ CONFIG_SWAP=y
188CONFIG_SYSVIPC=y 198CONFIG_SYSVIPC=y
189CONFIG_SYSVIPC_SYSCTL=y 199CONFIG_SYSVIPC_SYSCTL=y
190CONFIG_POSIX_MQUEUE=y 200CONFIG_POSIX_MQUEUE=y
201CONFIG_POSIX_MQUEUE_SYSCTL=y
191CONFIG_BSD_PROCESS_ACCT=y 202CONFIG_BSD_PROCESS_ACCT=y
192CONFIG_BSD_PROCESS_ACCT_V3=y 203CONFIG_BSD_PROCESS_ACCT_V3=y
193CONFIG_TASKSTATS=y 204CONFIG_TASKSTATS=y
@@ -195,23 +206,39 @@ CONFIG_TASK_DELAY_ACCT=y
195CONFIG_TASK_XACCT=y 206CONFIG_TASK_XACCT=y
196CONFIG_TASK_IO_ACCOUNTING=y 207CONFIG_TASK_IO_ACCOUNTING=y
197CONFIG_AUDIT=y 208CONFIG_AUDIT=y
209
210#
211# RCU Subsystem
212#
213CONFIG_TREE_RCU=y
214# CONFIG_TREE_PREEMPT_RCU is not set
215# CONFIG_TINY_RCU is not set
216# CONFIG_RCU_TRACE is not set
217CONFIG_RCU_FANOUT=64
218# CONFIG_RCU_FANOUT_EXACT is not set
219# CONFIG_RCU_FAST_NO_HZ is not set
220# CONFIG_TREE_RCU_TRACE is not set
198CONFIG_IKCONFIG=y 221CONFIG_IKCONFIG=y
199CONFIG_IKCONFIG_PROC=y 222CONFIG_IKCONFIG_PROC=y
200CONFIG_LOG_BUF_SHIFT=16 223CONFIG_LOG_BUF_SHIFT=16
201# CONFIG_CGROUPS is not set 224# CONFIG_CGROUPS is not set
202CONFIG_GROUP_SCHED=y 225# CONFIG_SYSFS_DEPRECATED_V2 is not set
203CONFIG_FAIR_GROUP_SCHED=y
204# CONFIG_RT_GROUP_SCHED is not set
205CONFIG_USER_SCHED=y
206# CONFIG_CGROUP_SCHED is not set
207CONFIG_SYSFS_DEPRECATED=y
208CONFIG_SYSFS_DEPRECATED_V2=y
209CONFIG_RELAY=y 226CONFIG_RELAY=y
210# CONFIG_NAMESPACES is not set 227CONFIG_NAMESPACES=y
228CONFIG_UTS_NS=y
229CONFIG_IPC_NS=y
230CONFIG_USER_NS=y
231CONFIG_PID_NS=y
232CONFIG_NET_NS=y
211CONFIG_BLK_DEV_INITRD=y 233CONFIG_BLK_DEV_INITRD=y
212CONFIG_INITRAMFS_SOURCE="" 234CONFIG_INITRAMFS_SOURCE=""
235CONFIG_RD_GZIP=y
236# CONFIG_RD_BZIP2 is not set
237# CONFIG_RD_LZMA is not set
238# CONFIG_RD_LZO is not set
213# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 239# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
214CONFIG_SYSCTL=y 240CONFIG_SYSCTL=y
241CONFIG_ANON_INODES=y
215CONFIG_EMBEDDED=y 242CONFIG_EMBEDDED=y
216# CONFIG_SYSCTL_SYSCALL is not set 243# CONFIG_SYSCTL_SYSCALL is not set
217CONFIG_KALLSYMS=y 244CONFIG_KALLSYMS=y
@@ -222,29 +249,36 @@ CONFIG_PRINTK=y
222CONFIG_BUG=y 249CONFIG_BUG=y
223CONFIG_ELF_CORE=y 250CONFIG_ELF_CORE=y
224# CONFIG_PCSPKR_PLATFORM is not set 251# CONFIG_PCSPKR_PLATFORM is not set
225CONFIG_COMPAT_BRK=y
226CONFIG_BASE_FULL=y 252CONFIG_BASE_FULL=y
227CONFIG_FUTEX=y 253CONFIG_FUTEX=y
228CONFIG_ANON_INODES=y
229CONFIG_EPOLL=y 254CONFIG_EPOLL=y
230CONFIG_SIGNALFD=y 255CONFIG_SIGNALFD=y
231CONFIG_TIMERFD=y 256CONFIG_TIMERFD=y
232CONFIG_EVENTFD=y 257CONFIG_EVENTFD=y
233CONFIG_SHMEM=y 258CONFIG_SHMEM=y
259CONFIG_AIO=y
260
261#
262# Kernel Performance Events And Counters
263#
234CONFIG_VM_EVENT_COUNTERS=y 264CONFIG_VM_EVENT_COUNTERS=y
265CONFIG_PCI_QUIRKS=y
266CONFIG_COMPAT_BRK=y
235CONFIG_SLAB=y 267CONFIG_SLAB=y
236# CONFIG_SLUB is not set 268# CONFIG_SLUB is not set
237# CONFIG_SLOB is not set 269# CONFIG_SLOB is not set
238# CONFIG_PROFILING is not set 270# CONFIG_PROFILING is not set
239# CONFIG_MARKERS is not set
240CONFIG_HAVE_OPROFILE=y 271CONFIG_HAVE_OPROFILE=y
241# CONFIG_HAVE_KPROBES is not set 272CONFIG_HAVE_SYSCALL_WRAPPERS=y
242# CONFIG_HAVE_KRETPROBES is not set 273CONFIG_USE_GENERIC_SMP_HELPERS=y
243# CONFIG_HAVE_DMA_ATTRS is not set 274
244CONFIG_PROC_PAGE_MONITOR=y 275#
276# GCOV-based kernel profiling
277#
278# CONFIG_SLOW_WORK is not set
279CONFIG_HAVE_GENERIC_DMA_COHERENT=y
245CONFIG_SLABINFO=y 280CONFIG_SLABINFO=y
246CONFIG_RT_MUTEXES=y 281CONFIG_RT_MUTEXES=y
247# CONFIG_TINY_SHMEM is not set
248CONFIG_BASE_SMALL=0 282CONFIG_BASE_SMALL=0
249CONFIG_MODULES=y 283CONFIG_MODULES=y
250# CONFIG_MODULE_FORCE_LOAD is not set 284# CONFIG_MODULE_FORCE_LOAD is not set
@@ -252,26 +286,52 @@ CONFIG_MODULE_UNLOAD=y
252# CONFIG_MODULE_FORCE_UNLOAD is not set 286# CONFIG_MODULE_FORCE_UNLOAD is not set
253CONFIG_MODVERSIONS=y 287CONFIG_MODVERSIONS=y
254CONFIG_MODULE_SRCVERSION_ALL=y 288CONFIG_MODULE_SRCVERSION_ALL=y
255CONFIG_KMOD=y
256CONFIG_STOP_MACHINE=y 289CONFIG_STOP_MACHINE=y
257CONFIG_BLOCK=y 290CONFIG_BLOCK=y
258# CONFIG_BLK_DEV_IO_TRACE is not set
259# CONFIG_BLK_DEV_BSG is not set 291# CONFIG_BLK_DEV_BSG is not set
292# CONFIG_BLK_DEV_INTEGRITY is not set
260CONFIG_BLOCK_COMPAT=y 293CONFIG_BLOCK_COMPAT=y
261 294
262# 295#
263# IO Schedulers 296# IO Schedulers
264# 297#
265CONFIG_IOSCHED_NOOP=y 298CONFIG_IOSCHED_NOOP=y
266CONFIG_IOSCHED_AS=y
267CONFIG_IOSCHED_DEADLINE=y 299CONFIG_IOSCHED_DEADLINE=y
268CONFIG_IOSCHED_CFQ=y 300CONFIG_IOSCHED_CFQ=y
269CONFIG_DEFAULT_AS=y
270# CONFIG_DEFAULT_DEADLINE is not set 301# CONFIG_DEFAULT_DEADLINE is not set
271# CONFIG_DEFAULT_CFQ is not set 302CONFIG_DEFAULT_CFQ=y
272# CONFIG_DEFAULT_NOOP is not set 303# CONFIG_DEFAULT_NOOP is not set
273CONFIG_DEFAULT_IOSCHED="anticipatory" 304CONFIG_DEFAULT_IOSCHED="cfq"
274CONFIG_CLASSIC_RCU=y 305# CONFIG_INLINE_SPIN_TRYLOCK is not set
306# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
307# CONFIG_INLINE_SPIN_LOCK is not set
308# CONFIG_INLINE_SPIN_LOCK_BH is not set
309# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
310# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
311CONFIG_INLINE_SPIN_UNLOCK=y
312# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
313CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
314# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
315# CONFIG_INLINE_READ_TRYLOCK is not set
316# CONFIG_INLINE_READ_LOCK is not set
317# CONFIG_INLINE_READ_LOCK_BH is not set
318# CONFIG_INLINE_READ_LOCK_IRQ is not set
319# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
320CONFIG_INLINE_READ_UNLOCK=y
321# CONFIG_INLINE_READ_UNLOCK_BH is not set
322CONFIG_INLINE_READ_UNLOCK_IRQ=y
323# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
324# CONFIG_INLINE_WRITE_TRYLOCK is not set
325# CONFIG_INLINE_WRITE_LOCK is not set
326# CONFIG_INLINE_WRITE_LOCK_BH is not set
327# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
328# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
329CONFIG_INLINE_WRITE_UNLOCK=y
330# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
331CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
332# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
333CONFIG_MUTEX_SPIN_ON_OWNER=y
334# CONFIG_FREEZER is not set
275 335
276# 336#
277# Bus options (PCI, PCMCIA, EISA, ISA, TC) 337# Bus options (PCI, PCMCIA, EISA, ISA, TC)
@@ -280,8 +340,9 @@ CONFIG_HW_HAS_PCI=y
280CONFIG_PCI=y 340CONFIG_PCI=y
281CONFIG_PCI_DOMAINS=y 341CONFIG_PCI_DOMAINS=y
282# CONFIG_ARCH_SUPPORTS_MSI is not set 342# CONFIG_ARCH_SUPPORTS_MSI is not set
283CONFIG_PCI_LEGACY=y
284CONFIG_PCI_DEBUG=y 343CONFIG_PCI_DEBUG=y
344# CONFIG_PCI_STUB is not set
345# CONFIG_PCI_IOV is not set
285CONFIG_MMU=y 346CONFIG_MMU=y
286CONFIG_ZONE_DMA32=y 347CONFIG_ZONE_DMA32=y
287# CONFIG_PCCARD is not set 348# CONFIG_PCCARD is not set
@@ -291,6 +352,8 @@ CONFIG_ZONE_DMA32=y
291# Executable file formats 352# Executable file formats
292# 353#
293CONFIG_BINFMT_ELF=y 354CONFIG_BINFMT_ELF=y
355# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
356# CONFIG_HAVE_AOUT is not set
294# CONFIG_BINFMT_MISC is not set 357# CONFIG_BINFMT_MISC is not set
295CONFIG_MIPS32_COMPAT=y 358CONFIG_MIPS32_COMPAT=y
296CONFIG_COMPAT=y 359CONFIG_COMPAT=y
@@ -304,23 +367,20 @@ CONFIG_BINFMT_ELF32=y
304# 367#
305CONFIG_PM=y 368CONFIG_PM=y
306# CONFIG_PM_DEBUG is not set 369# CONFIG_PM_DEBUG is not set
307 370# CONFIG_PM_RUNTIME is not set
308#
309# Networking
310#
311CONFIG_NET=y 371CONFIG_NET=y
312 372
313# 373#
314# Networking options 374# Networking options
315# 375#
316CONFIG_PACKET=y 376CONFIG_PACKET=y
317CONFIG_PACKET_MMAP=y
318CONFIG_UNIX=y 377CONFIG_UNIX=y
319CONFIG_XFRM=y 378CONFIG_XFRM=y
320CONFIG_XFRM_USER=m 379CONFIG_XFRM_USER=m
321# CONFIG_XFRM_SUB_POLICY is not set 380# CONFIG_XFRM_SUB_POLICY is not set
322CONFIG_XFRM_MIGRATE=y 381CONFIG_XFRM_MIGRATE=y
323# CONFIG_XFRM_STATISTICS is not set 382# CONFIG_XFRM_STATISTICS is not set
383CONFIG_XFRM_IPCOMP=m
324CONFIG_NET_KEY=y 384CONFIG_NET_KEY=y
325CONFIG_NET_KEY_MIGRATE=y 385CONFIG_NET_KEY_MIGRATE=y
326CONFIG_INET=y 386CONFIG_INET=y
@@ -353,36 +413,6 @@ CONFIG_INET_TCP_DIAG=y
353CONFIG_TCP_CONG_CUBIC=y 413CONFIG_TCP_CONG_CUBIC=y
354CONFIG_DEFAULT_TCP_CONG="cubic" 414CONFIG_DEFAULT_TCP_CONG="cubic"
355CONFIG_TCP_MD5SIG=y 415CONFIG_TCP_MD5SIG=y
356CONFIG_IP_VS=m
357# CONFIG_IP_VS_DEBUG is not set
358CONFIG_IP_VS_TAB_BITS=12
359
360#
361# IPVS transport protocol load balancing support
362#
363CONFIG_IP_VS_PROTO_TCP=y
364CONFIG_IP_VS_PROTO_UDP=y
365CONFIG_IP_VS_PROTO_ESP=y
366CONFIG_IP_VS_PROTO_AH=y
367
368#
369# IPVS scheduler
370#
371CONFIG_IP_VS_RR=m
372CONFIG_IP_VS_WRR=m
373CONFIG_IP_VS_LC=m
374CONFIG_IP_VS_WLC=m
375CONFIG_IP_VS_LBLC=m
376CONFIG_IP_VS_LBLCR=m
377CONFIG_IP_VS_DH=m
378CONFIG_IP_VS_SH=m
379CONFIG_IP_VS_SED=m
380CONFIG_IP_VS_NQ=m
381
382#
383# IPVS application helper
384#
385CONFIG_IP_VS_FTP=m
386CONFIG_IPV6=m 416CONFIG_IPV6=m
387CONFIG_IPV6_PRIVACY=y 417CONFIG_IPV6_PRIVACY=y
388CONFIG_IPV6_ROUTER_PREF=y 418CONFIG_IPV6_ROUTER_PREF=y
@@ -399,11 +429,13 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=m
399CONFIG_INET6_XFRM_MODE_BEET=m 429CONFIG_INET6_XFRM_MODE_BEET=m
400CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m 430CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
401CONFIG_IPV6_SIT=m 431CONFIG_IPV6_SIT=m
432CONFIG_IPV6_SIT_6RD=y
402CONFIG_IPV6_NDISC_NODETYPE=y 433CONFIG_IPV6_NDISC_NODETYPE=y
403CONFIG_IPV6_TUNNEL=m 434CONFIG_IPV6_TUNNEL=m
404CONFIG_IPV6_MULTIPLE_TABLES=y 435CONFIG_IPV6_MULTIPLE_TABLES=y
405CONFIG_IPV6_SUBTREES=y 436CONFIG_IPV6_SUBTREES=y
406# CONFIG_IPV6_MROUTE is not set 437# CONFIG_IPV6_MROUTE is not set
438CONFIG_NETLABEL=y
407CONFIG_NETWORK_SECMARK=y 439CONFIG_NETWORK_SECMARK=y
408CONFIG_NETFILTER=y 440CONFIG_NETFILTER=y
409# CONFIG_NETFILTER_DEBUG is not set 441# CONFIG_NETFILTER_DEBUG is not set
@@ -421,19 +453,53 @@ CONFIG_NF_CONNTRACK_IRC=m
421CONFIG_NF_CONNTRACK_SIP=m 453CONFIG_NF_CONNTRACK_SIP=m
422CONFIG_NF_CT_NETLINK=m 454CONFIG_NF_CT_NETLINK=m
423CONFIG_NETFILTER_XTABLES=m 455CONFIG_NETFILTER_XTABLES=m
456CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
424CONFIG_NETFILTER_XT_TARGET_MARK=m 457CONFIG_NETFILTER_XT_TARGET_MARK=m
425CONFIG_NETFILTER_XT_TARGET_NFLOG=m 458CONFIG_NETFILTER_XT_TARGET_NFLOG=m
426CONFIG_NETFILTER_XT_TARGET_SECMARK=m 459CONFIG_NETFILTER_XT_TARGET_SECMARK=m
427CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
428CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 460CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
429CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m 461CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
430CONFIG_NETFILTER_XT_MATCH_MARK=m 462CONFIG_NETFILTER_XT_MATCH_MARK=m
431CONFIG_NETFILTER_XT_MATCH_POLICY=m 463CONFIG_NETFILTER_XT_MATCH_POLICY=m
432CONFIG_NETFILTER_XT_MATCH_STATE=m 464CONFIG_NETFILTER_XT_MATCH_STATE=m
465CONFIG_IP_VS=m
466CONFIG_IP_VS_IPV6=y
467# CONFIG_IP_VS_DEBUG is not set
468CONFIG_IP_VS_TAB_BITS=12
469
470#
471# IPVS transport protocol load balancing support
472#
473CONFIG_IP_VS_PROTO_TCP=y
474CONFIG_IP_VS_PROTO_UDP=y
475CONFIG_IP_VS_PROTO_AH_ESP=y
476CONFIG_IP_VS_PROTO_ESP=y
477CONFIG_IP_VS_PROTO_AH=y
478CONFIG_IP_VS_PROTO_SCTP=y
479
480#
481# IPVS scheduler
482#
483CONFIG_IP_VS_RR=m
484CONFIG_IP_VS_WRR=m
485CONFIG_IP_VS_LC=m
486CONFIG_IP_VS_WLC=m
487CONFIG_IP_VS_LBLC=m
488CONFIG_IP_VS_LBLCR=m
489CONFIG_IP_VS_DH=m
490CONFIG_IP_VS_SH=m
491CONFIG_IP_VS_SED=m
492CONFIG_IP_VS_NQ=m
493
494#
495# IPVS application helper
496#
497CONFIG_IP_VS_FTP=m
433 498
434# 499#
435# IP: Netfilter Configuration 500# IP: Netfilter Configuration
436# 501#
502CONFIG_NF_DEFRAG_IPV4=m
437CONFIG_NF_CONNTRACK_IPV4=m 503CONFIG_NF_CONNTRACK_IPV4=m
438CONFIG_NF_CONNTRACK_PROC_COMPAT=y 504CONFIG_NF_CONNTRACK_PROC_COMPAT=y
439CONFIG_IP_NF_IPTABLES=m 505CONFIG_IP_NF_IPTABLES=m
@@ -459,22 +525,44 @@ CONFIG_IP_NF_MANGLE=m
459CONFIG_NF_CONNTRACK_IPV6=m 525CONFIG_NF_CONNTRACK_IPV6=m
460CONFIG_IP6_NF_IPTABLES=m 526CONFIG_IP6_NF_IPTABLES=m
461CONFIG_IP6_NF_MATCH_IPV6HEADER=m 527CONFIG_IP6_NF_MATCH_IPV6HEADER=m
462CONFIG_IP6_NF_FILTER=m
463CONFIG_IP6_NF_TARGET_LOG=m 528CONFIG_IP6_NF_TARGET_LOG=m
529CONFIG_IP6_NF_FILTER=m
464CONFIG_IP6_NF_TARGET_REJECT=m 530CONFIG_IP6_NF_TARGET_REJECT=m
465CONFIG_IP6_NF_MANGLE=m 531CONFIG_IP6_NF_MANGLE=m
466# CONFIG_IP_DCCP is not set 532CONFIG_IP_DCCP=m
533CONFIG_INET_DCCP_DIAG=m
534
535#
536# DCCP CCIDs Configuration (EXPERIMENTAL)
537#
538# CONFIG_IP_DCCP_CCID2_DEBUG is not set
539CONFIG_IP_DCCP_CCID3=y
540# CONFIG_IP_DCCP_CCID3_DEBUG is not set
541CONFIG_IP_DCCP_CCID3_RTO=100
542CONFIG_IP_DCCP_TFRC_LIB=y
543
544#
545# DCCP Kernel Hacking
546#
547# CONFIG_IP_DCCP_DEBUG is not set
467CONFIG_IP_SCTP=m 548CONFIG_IP_SCTP=m
468# CONFIG_SCTP_DBG_MSG is not set 549# CONFIG_SCTP_DBG_MSG is not set
469# CONFIG_SCTP_DBG_OBJCNT is not set 550# CONFIG_SCTP_DBG_OBJCNT is not set
470# CONFIG_SCTP_HMAC_NONE is not set 551# CONFIG_SCTP_HMAC_NONE is not set
471# CONFIG_SCTP_HMAC_SHA1 is not set 552CONFIG_SCTP_HMAC_SHA1=y
472CONFIG_SCTP_HMAC_MD5=y 553# CONFIG_SCTP_HMAC_MD5 is not set
554# CONFIG_RDS is not set
473# CONFIG_TIPC is not set 555# CONFIG_TIPC is not set
474# CONFIG_ATM is not set 556# CONFIG_ATM is not set
475# CONFIG_BRIDGE is not set 557CONFIG_STP=m
476# CONFIG_VLAN_8021Q is not set 558CONFIG_GARP=m
559CONFIG_BRIDGE=m
560CONFIG_BRIDGE_IGMP_SNOOPING=y
561# CONFIG_NET_DSA is not set
562CONFIG_VLAN_8021Q=m
563CONFIG_VLAN_8021Q_GVRP=y
477# CONFIG_DECNET is not set 564# CONFIG_DECNET is not set
565CONFIG_LLC=m
478# CONFIG_LLC2 is not set 566# CONFIG_LLC2 is not set
479# CONFIG_IPX is not set 567# CONFIG_IPX is not set
480# CONFIG_ATALK is not set 568# CONFIG_ATALK is not set
@@ -482,26 +570,47 @@ CONFIG_SCTP_HMAC_MD5=y
482# CONFIG_LAPB is not set 570# CONFIG_LAPB is not set
483# CONFIG_ECONET is not set 571# CONFIG_ECONET is not set
484# CONFIG_WAN_ROUTER is not set 572# CONFIG_WAN_ROUTER is not set
573# CONFIG_PHONET is not set
574# CONFIG_IEEE802154 is not set
485# CONFIG_NET_SCHED is not set 575# CONFIG_NET_SCHED is not set
576# CONFIG_DCB is not set
486 577
487# 578#
488# Network testing 579# Network testing
489# 580#
490# CONFIG_NET_PKTGEN is not set 581# CONFIG_NET_PKTGEN is not set
491# CONFIG_HAMRADIO is not set 582CONFIG_HAMRADIO=y
583
584#
585# Packet Radio protocols
586#
587CONFIG_AX25=m
588CONFIG_AX25_DAMA_SLAVE=y
589CONFIG_NETROM=m
590CONFIG_ROSE=m
591
592#
593# AX.25 network device drivers
594#
595CONFIG_MKISS=m
596CONFIG_6PACK=m
597CONFIG_BPQETHER=m
598CONFIG_BAYCOM_SER_FDX=m
599CONFIG_BAYCOM_SER_HDX=m
600CONFIG_YAM=m
492# CONFIG_CAN is not set 601# CONFIG_CAN is not set
493# CONFIG_IRDA is not set 602# CONFIG_IRDA is not set
494# CONFIG_BT is not set 603# CONFIG_BT is not set
495# CONFIG_AF_RXRPC is not set 604# CONFIG_AF_RXRPC is not set
496CONFIG_FIB_RULES=y 605CONFIG_FIB_RULES=y
606CONFIG_WIRELESS=y
607# CONFIG_CFG80211 is not set
608# CONFIG_LIB80211 is not set
497 609
498# 610#
499# Wireless 611# CFG80211 needs to be enabled for MAC80211
500# 612#
501# CONFIG_CFG80211 is not set 613# CONFIG_WIMAX is not set
502# CONFIG_WIRELESS_EXT is not set
503# CONFIG_MAC80211 is not set
504# CONFIG_IEEE80211 is not set
505# CONFIG_RFKILL is not set 614# CONFIG_RFKILL is not set
506# CONFIG_NET_9P is not set 615# CONFIG_NET_9P is not set
507 616
@@ -513,9 +622,12 @@ CONFIG_FIB_RULES=y
513# Generic Driver Options 622# Generic Driver Options
514# 623#
515CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 624CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
625# CONFIG_DEVTMPFS is not set
516CONFIG_STANDALONE=y 626CONFIG_STANDALONE=y
517CONFIG_PREVENT_FIRMWARE_BUILD=y 627CONFIG_PREVENT_FIRMWARE_BUILD=y
518CONFIG_FW_LOADER=m 628CONFIG_FW_LOADER=m
629CONFIG_FIRMWARE_IN_KERNEL=y
630CONFIG_EXTRA_FIRMWARE=""
519# CONFIG_DEBUG_DRIVER is not set 631# CONFIG_DEBUG_DRIVER is not set
520# CONFIG_DEBUG_DEVRES is not set 632# CONFIG_DEBUG_DEVRES is not set
521# CONFIG_SYS_HYPERVISOR is not set 633# CONFIG_SYS_HYPERVISOR is not set
@@ -530,33 +642,53 @@ CONFIG_BLK_DEV=y
530# CONFIG_BLK_DEV_COW_COMMON is not set 642# CONFIG_BLK_DEV_COW_COMMON is not set
531CONFIG_BLK_DEV_LOOP=m 643CONFIG_BLK_DEV_LOOP=m
532CONFIG_BLK_DEV_CRYPTOLOOP=m 644CONFIG_BLK_DEV_CRYPTOLOOP=m
645
646#
647# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
648#
533CONFIG_BLK_DEV_NBD=m 649CONFIG_BLK_DEV_NBD=m
534# CONFIG_BLK_DEV_SX8 is not set 650# CONFIG_BLK_DEV_SX8 is not set
535# CONFIG_BLK_DEV_RAM is not set 651# CONFIG_BLK_DEV_RAM is not set
536# CONFIG_CDROM_PKTCDVD is not set 652# CONFIG_CDROM_PKTCDVD is not set
537# CONFIG_ATA_OVER_ETH is not set 653# CONFIG_ATA_OVER_ETH is not set
654# CONFIG_BLK_DEV_HD is not set
538CONFIG_MISC_DEVICES=y 655CONFIG_MISC_DEVICES=y
656# CONFIG_AD525X_DPOT is not set
539# CONFIG_PHANTOM is not set 657# CONFIG_PHANTOM is not set
540# CONFIG_EEPROM_93CX6 is not set
541CONFIG_SGI_IOC4=m 658CONFIG_SGI_IOC4=m
542# CONFIG_TIFM_CORE is not set 659# CONFIG_TIFM_CORE is not set
660# CONFIG_ICS932S401 is not set
543# CONFIG_ENCLOSURE_SERVICES is not set 661# CONFIG_ENCLOSURE_SERVICES is not set
662# CONFIG_HP_ILO is not set
663# CONFIG_ISL29003 is not set
664# CONFIG_SENSORS_TSL2550 is not set
665# CONFIG_DS1682 is not set
666# CONFIG_C2PORT is not set
667
668#
669# EEPROM support
670#
671# CONFIG_EEPROM_AT24 is not set
672CONFIG_EEPROM_LEGACY=y
673CONFIG_EEPROM_MAX6875=y
674# CONFIG_EEPROM_93CX6 is not set
675# CONFIG_CB710_CORE is not set
544CONFIG_HAVE_IDE=y 676CONFIG_HAVE_IDE=y
545CONFIG_IDE=y 677CONFIG_IDE=y
546CONFIG_IDE_MAX_HWIFS=4
547CONFIG_BLK_DEV_IDE=y
548 678
549# 679#
550# Please see Documentation/ide/ide.txt for help/info on IDE drives 680# Please see Documentation/ide/ide.txt for help/info on IDE drives
551# 681#
682CONFIG_IDE_XFER_MODE=y
683CONFIG_IDE_TIMINGS=y
684CONFIG_IDE_ATAPI=y
552# CONFIG_BLK_DEV_IDE_SATA is not set 685# CONFIG_BLK_DEV_IDE_SATA is not set
553CONFIG_BLK_DEV_IDEDISK=y 686CONFIG_IDE_GD=y
554# CONFIG_IDEDISK_MULTI_MODE is not set 687CONFIG_IDE_GD_ATA=y
688# CONFIG_IDE_GD_ATAPI is not set
555CONFIG_BLK_DEV_IDECD=y 689CONFIG_BLK_DEV_IDECD=y
556CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y 690CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y
557CONFIG_BLK_DEV_IDETAPE=y 691CONFIG_BLK_DEV_IDETAPE=y
558CONFIG_BLK_DEV_IDEFLOPPY=y
559# CONFIG_BLK_DEV_IDESCSI is not set
560# CONFIG_IDE_TASK_IOCTL is not set 692# CONFIG_IDE_TASK_IOCTL is not set
561CONFIG_IDE_PROC_FS=y 693CONFIG_IDE_PROC_FS=y
562 694
@@ -581,14 +713,13 @@ CONFIG_BLK_DEV_IDEDMA_PCI=y
581# CONFIG_BLK_DEV_AMD74XX is not set 713# CONFIG_BLK_DEV_AMD74XX is not set
582CONFIG_BLK_DEV_CMD64X=y 714CONFIG_BLK_DEV_CMD64X=y
583# CONFIG_BLK_DEV_TRIFLEX is not set 715# CONFIG_BLK_DEV_TRIFLEX is not set
584# CONFIG_BLK_DEV_CY82C693 is not set
585# CONFIG_BLK_DEV_CS5520 is not set 716# CONFIG_BLK_DEV_CS5520 is not set
586# CONFIG_BLK_DEV_CS5530 is not set 717# CONFIG_BLK_DEV_CS5530 is not set
587# CONFIG_BLK_DEV_HPT34X is not set
588# CONFIG_BLK_DEV_HPT366 is not set 718# CONFIG_BLK_DEV_HPT366 is not set
589# CONFIG_BLK_DEV_JMICRON is not set 719# CONFIG_BLK_DEV_JMICRON is not set
590# CONFIG_BLK_DEV_SC1200 is not set 720# CONFIG_BLK_DEV_SC1200 is not set
591# CONFIG_BLK_DEV_PIIX is not set 721# CONFIG_BLK_DEV_PIIX is not set
722# CONFIG_BLK_DEV_IT8172 is not set
592CONFIG_BLK_DEV_IT8213=m 723CONFIG_BLK_DEV_IT8213=m
593# CONFIG_BLK_DEV_IT821X is not set 724# CONFIG_BLK_DEV_IT821X is not set
594# CONFIG_BLK_DEV_NS87415 is not set 725# CONFIG_BLK_DEV_NS87415 is not set
@@ -600,14 +731,12 @@ CONFIG_BLK_DEV_IT8213=m
600# CONFIG_BLK_DEV_TRM290 is not set 731# CONFIG_BLK_DEV_TRM290 is not set
601# CONFIG_BLK_DEV_VIA82CXXX is not set 732# CONFIG_BLK_DEV_VIA82CXXX is not set
602CONFIG_BLK_DEV_TC86C001=m 733CONFIG_BLK_DEV_TC86C001=m
603# CONFIG_BLK_DEV_IDE_SWARM is not set
604CONFIG_BLK_DEV_IDEDMA=y 734CONFIG_BLK_DEV_IDEDMA=y
605# CONFIG_BLK_DEV_HD_ONLY is not set
606# CONFIG_BLK_DEV_HD is not set
607 735
608# 736#
609# SCSI device support 737# SCSI device support
610# 738#
739CONFIG_SCSI_MOD=y
611# CONFIG_RAID_ATTRS is not set 740# CONFIG_RAID_ATTRS is not set
612CONFIG_SCSI=y 741CONFIG_SCSI=y
613CONFIG_SCSI_DMA=y 742CONFIG_SCSI_DMA=y
@@ -625,10 +754,6 @@ CONFIG_BLK_DEV_SR=m
625CONFIG_BLK_DEV_SR_VENDOR=y 754CONFIG_BLK_DEV_SR_VENDOR=y
626CONFIG_CHR_DEV_SG=m 755CONFIG_CHR_DEV_SG=m
627CONFIG_CHR_DEV_SCH=m 756CONFIG_CHR_DEV_SCH=m
628
629#
630# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
631#
632# CONFIG_SCSI_MULTI_LUN is not set 757# CONFIG_SCSI_MULTI_LUN is not set
633# CONFIG_SCSI_CONSTANTS is not set 758# CONFIG_SCSI_CONSTANTS is not set
634# CONFIG_SCSI_LOGGING is not set 759# CONFIG_SCSI_LOGGING is not set
@@ -645,27 +770,36 @@ CONFIG_SCSI_WAIT_SCAN=m
645# CONFIG_SCSI_SRP_ATTRS is not set 770# CONFIG_SCSI_SRP_ATTRS is not set
646CONFIG_SCSI_LOWLEVEL=y 771CONFIG_SCSI_LOWLEVEL=y
647# CONFIG_ISCSI_TCP is not set 772# CONFIG_ISCSI_TCP is not set
773# CONFIG_SCSI_CXGB3_ISCSI is not set
774# CONFIG_SCSI_BNX2_ISCSI is not set
775# CONFIG_BE2ISCSI is not set
648# CONFIG_BLK_DEV_3W_XXXX_RAID is not set 776# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
777# CONFIG_SCSI_HPSA is not set
649# CONFIG_SCSI_3W_9XXX is not set 778# CONFIG_SCSI_3W_9XXX is not set
779# CONFIG_SCSI_3W_SAS is not set
650# CONFIG_SCSI_ACARD is not set 780# CONFIG_SCSI_ACARD is not set
651# CONFIG_SCSI_AACRAID is not set 781# CONFIG_SCSI_AACRAID is not set
652# CONFIG_SCSI_AIC7XXX is not set 782# CONFIG_SCSI_AIC7XXX is not set
653# CONFIG_SCSI_AIC7XXX_OLD is not set 783# CONFIG_SCSI_AIC7XXX_OLD is not set
654# CONFIG_SCSI_AIC79XX is not set 784# CONFIG_SCSI_AIC79XX is not set
655# CONFIG_SCSI_AIC94XX is not set 785# CONFIG_SCSI_AIC94XX is not set
786# CONFIG_SCSI_MVSAS is not set
656# CONFIG_SCSI_DPT_I2O is not set 787# CONFIG_SCSI_DPT_I2O is not set
657# CONFIG_SCSI_ADVANSYS is not set 788# CONFIG_SCSI_ADVANSYS is not set
658# CONFIG_SCSI_ARCMSR is not set 789# CONFIG_SCSI_ARCMSR is not set
659# CONFIG_MEGARAID_NEWGEN is not set 790# CONFIG_MEGARAID_NEWGEN is not set
660# CONFIG_MEGARAID_LEGACY is not set 791# CONFIG_MEGARAID_LEGACY is not set
661# CONFIG_MEGARAID_SAS is not set 792# CONFIG_MEGARAID_SAS is not set
793# CONFIG_SCSI_MPT2SAS is not set
662# CONFIG_SCSI_HPTIOP is not set 794# CONFIG_SCSI_HPTIOP is not set
795# CONFIG_LIBFC is not set
796# CONFIG_LIBFCOE is not set
797# CONFIG_FCOE is not set
663# CONFIG_SCSI_DMX3191D is not set 798# CONFIG_SCSI_DMX3191D is not set
664# CONFIG_SCSI_FUTURE_DOMAIN is not set 799# CONFIG_SCSI_FUTURE_DOMAIN is not set
665# CONFIG_SCSI_IPS is not set 800# CONFIG_SCSI_IPS is not set
666# CONFIG_SCSI_INITIO is not set 801# CONFIG_SCSI_INITIO is not set
667# CONFIG_SCSI_INIA100 is not set 802# CONFIG_SCSI_INIA100 is not set
668# CONFIG_SCSI_MVSAS is not set
669# CONFIG_SCSI_STEX is not set 803# CONFIG_SCSI_STEX is not set
670# CONFIG_SCSI_SYM53C8XX_2 is not set 804# CONFIG_SCSI_SYM53C8XX_2 is not set
671# CONFIG_SCSI_IPR is not set 805# CONFIG_SCSI_IPR is not set
@@ -676,9 +810,15 @@ CONFIG_SCSI_LOWLEVEL=y
676# CONFIG_SCSI_DC395x is not set 810# CONFIG_SCSI_DC395x is not set
677# CONFIG_SCSI_DC390T is not set 811# CONFIG_SCSI_DC390T is not set
678# CONFIG_SCSI_DEBUG is not set 812# CONFIG_SCSI_DEBUG is not set
813# CONFIG_SCSI_PMCRAID is not set
814# CONFIG_SCSI_PM8001 is not set
679# CONFIG_SCSI_SRP is not set 815# CONFIG_SCSI_SRP is not set
816# CONFIG_SCSI_BFA_FC is not set
817# CONFIG_SCSI_DH is not set
818# CONFIG_SCSI_OSD_INITIATOR is not set
680CONFIG_ATA=y 819CONFIG_ATA=y
681# CONFIG_ATA_NONSTANDARD is not set 820# CONFIG_ATA_NONSTANDARD is not set
821CONFIG_ATA_VERBOSE_ERROR=y
682CONFIG_SATA_PMP=y 822CONFIG_SATA_PMP=y
683# CONFIG_SATA_AHCI is not set 823# CONFIG_SATA_AHCI is not set
684CONFIG_SATA_SIL24=y 824CONFIG_SATA_SIL24=y
@@ -700,6 +840,7 @@ CONFIG_ATA_SFF=y
700# CONFIG_PATA_ALI is not set 840# CONFIG_PATA_ALI is not set
701# CONFIG_PATA_AMD is not set 841# CONFIG_PATA_AMD is not set
702# CONFIG_PATA_ARTOP is not set 842# CONFIG_PATA_ARTOP is not set
843# CONFIG_PATA_ATP867X is not set
703# CONFIG_PATA_ATIIXP is not set 844# CONFIG_PATA_ATIIXP is not set
704# CONFIG_PATA_CMD640_PCI is not set 845# CONFIG_PATA_CMD640_PCI is not set
705# CONFIG_PATA_CMD64X is not set 846# CONFIG_PATA_CMD64X is not set
@@ -715,6 +856,7 @@ CONFIG_ATA_SFF=y
715# CONFIG_PATA_IT821X is not set 856# CONFIG_PATA_IT821X is not set
716# CONFIG_PATA_IT8213 is not set 857# CONFIG_PATA_IT8213 is not set
717# CONFIG_PATA_JMICRON is not set 858# CONFIG_PATA_JMICRON is not set
859# CONFIG_PATA_LEGACY is not set
718# CONFIG_PATA_TRIFLEX is not set 860# CONFIG_PATA_TRIFLEX is not set
719# CONFIG_PATA_MARVELL is not set 861# CONFIG_PATA_MARVELL is not set
720# CONFIG_PATA_MPIIX is not set 862# CONFIG_PATA_MPIIX is not set
@@ -725,14 +867,16 @@ CONFIG_ATA_SFF=y
725# CONFIG_PATA_NS87415 is not set 867# CONFIG_PATA_NS87415 is not set
726# CONFIG_PATA_OPTI is not set 868# CONFIG_PATA_OPTI is not set
727# CONFIG_PATA_OPTIDMA is not set 869# CONFIG_PATA_OPTIDMA is not set
870# CONFIG_PATA_PDC2027X is not set
728# CONFIG_PATA_PDC_OLD is not set 871# CONFIG_PATA_PDC_OLD is not set
729# CONFIG_PATA_RADISYS is not set 872# CONFIG_PATA_RADISYS is not set
873# CONFIG_PATA_RDC is not set
730# CONFIG_PATA_RZ1000 is not set 874# CONFIG_PATA_RZ1000 is not set
731# CONFIG_PATA_SC1200 is not set 875# CONFIG_PATA_SC1200 is not set
732# CONFIG_PATA_SERVERWORKS is not set 876# CONFIG_PATA_SERVERWORKS is not set
733# CONFIG_PATA_PDC2027X is not set
734CONFIG_PATA_SIL680=y 877CONFIG_PATA_SIL680=y
735# CONFIG_PATA_SIS is not set 878# CONFIG_PATA_SIS is not set
879# CONFIG_PATA_TOSHIBA is not set
736# CONFIG_PATA_VIA is not set 880# CONFIG_PATA_VIA is not set
737# CONFIG_PATA_WINBOND is not set 881# CONFIG_PATA_WINBOND is not set
738# CONFIG_PATA_PLATFORM is not set 882# CONFIG_PATA_PLATFORM is not set
@@ -745,13 +889,16 @@ CONFIG_PATA_SIL680=y
745# 889#
746 890
747# 891#
748# Enable only one of the two stacks, unless you know what you are doing 892# You can enable one or both FireWire driver stacks.
893#
894
895#
896# The newer stack is recommended.
749# 897#
750# CONFIG_FIREWIRE is not set 898# CONFIG_FIREWIRE is not set
751# CONFIG_IEEE1394 is not set 899# CONFIG_IEEE1394 is not set
752# CONFIG_I2O is not set 900# CONFIG_I2O is not set
753CONFIG_NETDEVICES=y 901CONFIG_NETDEVICES=y
754# CONFIG_NETDEVICES_MULTIQUEUE is not set
755# CONFIG_DUMMY is not set 902# CONFIG_DUMMY is not set
756# CONFIG_BONDING is not set 903# CONFIG_BONDING is not set
757# CONFIG_MACVLAN is not set 904# CONFIG_MACVLAN is not set
@@ -774,6 +921,9 @@ CONFIG_PHYLIB=y
774# CONFIG_BROADCOM_PHY is not set 921# CONFIG_BROADCOM_PHY is not set
775# CONFIG_ICPLUS_PHY is not set 922# CONFIG_ICPLUS_PHY is not set
776# CONFIG_REALTEK_PHY is not set 923# CONFIG_REALTEK_PHY is not set
924# CONFIG_NATIONAL_PHY is not set
925# CONFIG_STE10XP is not set
926# CONFIG_LSI_ET1011C_PHY is not set
777# CONFIG_FIXED_PHY is not set 927# CONFIG_FIXED_PHY is not set
778# CONFIG_MDIO_BITBANG is not set 928# CONFIG_MDIO_BITBANG is not set
779CONFIG_NET_ETHERNET=y 929CONFIG_NET_ETHERNET=y
@@ -783,23 +933,33 @@ CONFIG_MII=y
783# CONFIG_SUNGEM is not set 933# CONFIG_SUNGEM is not set
784# CONFIG_CASSINI is not set 934# CONFIG_CASSINI is not set
785# CONFIG_NET_VENDOR_3COM is not set 935# CONFIG_NET_VENDOR_3COM is not set
936# CONFIG_SMC91X is not set
786# CONFIG_DM9000 is not set 937# CONFIG_DM9000 is not set
938# CONFIG_ETHOC is not set
939# CONFIG_SMSC911X is not set
940# CONFIG_DNET is not set
787# CONFIG_NET_TULIP is not set 941# CONFIG_NET_TULIP is not set
788# CONFIG_HP100 is not set 942# CONFIG_HP100 is not set
789# CONFIG_IBM_NEW_EMAC_ZMII is not set 943# CONFIG_IBM_NEW_EMAC_ZMII is not set
790# CONFIG_IBM_NEW_EMAC_RGMII is not set 944# CONFIG_IBM_NEW_EMAC_RGMII is not set
791# CONFIG_IBM_NEW_EMAC_TAH is not set 945# CONFIG_IBM_NEW_EMAC_TAH is not set
792# CONFIG_IBM_NEW_EMAC_EMAC4 is not set 946# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
947# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
948# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
949# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
793# CONFIG_NET_PCI is not set 950# CONFIG_NET_PCI is not set
794# CONFIG_B44 is not set 951# CONFIG_B44 is not set
952# CONFIG_KS8842 is not set
953# CONFIG_KS8851_MLL is not set
954# CONFIG_ATL2 is not set
795CONFIG_NETDEV_1000=y 955CONFIG_NETDEV_1000=y
796# CONFIG_ACENIC is not set 956# CONFIG_ACENIC is not set
797# CONFIG_DL2K is not set 957# CONFIG_DL2K is not set
798# CONFIG_E1000 is not set 958# CONFIG_E1000 is not set
799# CONFIG_E1000E is not set 959# CONFIG_E1000E is not set
800# CONFIG_E1000E_ENABLED is not set
801# CONFIG_IP1000 is not set 960# CONFIG_IP1000 is not set
802# CONFIG_IGB is not set 961# CONFIG_IGB is not set
962# CONFIG_IGBVF is not set
803# CONFIG_NS83820 is not set 963# CONFIG_NS83820 is not set
804# CONFIG_HAMACHI is not set 964# CONFIG_HAMACHI is not set
805# CONFIG_YELLOWFIN is not set 965# CONFIG_YELLOWFIN is not set
@@ -811,29 +971,42 @@ CONFIG_SB1250_MAC=y
811# CONFIG_VIA_VELOCITY is not set 971# CONFIG_VIA_VELOCITY is not set
812# CONFIG_TIGON3 is not set 972# CONFIG_TIGON3 is not set
813# CONFIG_BNX2 is not set 973# CONFIG_BNX2 is not set
974# CONFIG_CNIC is not set
814# CONFIG_QLA3XXX is not set 975# CONFIG_QLA3XXX is not set
815# CONFIG_ATL1 is not set 976# CONFIG_ATL1 is not set
977# CONFIG_ATL1E is not set
978# CONFIG_ATL1C is not set
979# CONFIG_JME is not set
816CONFIG_NETDEV_10000=y 980CONFIG_NETDEV_10000=y
981CONFIG_MDIO=m
817# CONFIG_CHELSIO_T1 is not set 982# CONFIG_CHELSIO_T1 is not set
983CONFIG_CHELSIO_T3_DEPENDS=y
818CONFIG_CHELSIO_T3=m 984CONFIG_CHELSIO_T3=m
985# CONFIG_ENIC is not set
819# CONFIG_IXGBE is not set 986# CONFIG_IXGBE is not set
820# CONFIG_IXGB is not set 987# CONFIG_IXGB is not set
821# CONFIG_S2IO is not set 988# CONFIG_S2IO is not set
989# CONFIG_VXGE is not set
822# CONFIG_MYRI10GE is not set 990# CONFIG_MYRI10GE is not set
823CONFIG_NETXEN_NIC=m 991CONFIG_NETXEN_NIC=m
824# CONFIG_NIU is not set 992# CONFIG_NIU is not set
993# CONFIG_MLX4_EN is not set
825# CONFIG_MLX4_CORE is not set 994# CONFIG_MLX4_CORE is not set
826# CONFIG_TEHUTI is not set 995# CONFIG_TEHUTI is not set
827# CONFIG_BNX2X is not set 996# CONFIG_BNX2X is not set
997# CONFIG_QLCNIC is not set
998# CONFIG_QLGE is not set
828# CONFIG_SFC is not set 999# CONFIG_SFC is not set
1000# CONFIG_BE2NET is not set
829# CONFIG_TR is not set 1001# CONFIG_TR is not set
1002CONFIG_WLAN=y
1003# CONFIG_ATMEL is not set
1004# CONFIG_PRISM54 is not set
1005# CONFIG_HOSTAP is not set
830 1006
831# 1007#
832# Wireless LAN 1008# Enable WiMAX (Networking options) to see the WiMAX drivers
833# 1009#
834# CONFIG_WLAN_PRE80211 is not set
835# CONFIG_WLAN_80211 is not set
836# CONFIG_IWLWIFI_LEDS is not set
837# CONFIG_WAN is not set 1010# CONFIG_WAN is not set
838# CONFIG_FDDI is not set 1011# CONFIG_FDDI is not set
839# CONFIG_HIPPI is not set 1012# CONFIG_HIPPI is not set
@@ -856,6 +1029,7 @@ CONFIG_SLIP_MODE_SLIP6=y
856# CONFIG_NETCONSOLE is not set 1029# CONFIG_NETCONSOLE is not set
857# CONFIG_NETPOLL is not set 1030# CONFIG_NETPOLL is not set
858# CONFIG_NET_POLL_CONTROLLER is not set 1031# CONFIG_NET_POLL_CONTROLLER is not set
1032# CONFIG_VMXNET3 is not set
859# CONFIG_ISDN is not set 1033# CONFIG_ISDN is not set
860# CONFIG_PHONE is not set 1034# CONFIG_PHONE is not set
861 1035
@@ -873,6 +1047,7 @@ CONFIG_SERIO_SERPORT=y
873# CONFIG_SERIO_PCIPS2 is not set 1047# CONFIG_SERIO_PCIPS2 is not set
874# CONFIG_SERIO_LIBPS2 is not set 1048# CONFIG_SERIO_LIBPS2 is not set
875CONFIG_SERIO_RAW=m 1049CONFIG_SERIO_RAW=m
1050# CONFIG_SERIO_ALTERA_PS2 is not set
876# CONFIG_GAMEPORT is not set 1051# CONFIG_GAMEPORT is not set
877 1052
878# 1053#
@@ -893,8 +1068,6 @@ CONFIG_SERIAL_NONSTANDARD=y
893# CONFIG_N_HDLC is not set 1068# CONFIG_N_HDLC is not set
894# CONFIG_RISCOM8 is not set 1069# CONFIG_RISCOM8 is not set
895# CONFIG_SPECIALIX is not set 1070# CONFIG_SPECIALIX is not set
896# CONFIG_SX is not set
897# CONFIG_RIO is not set
898# CONFIG_STALDRV is not set 1071# CONFIG_STALDRV is not set
899# CONFIG_NOZOMI is not set 1072# CONFIG_NOZOMI is not set
900 1073
@@ -911,7 +1084,9 @@ CONFIG_SERIAL_SB1250_DUART_CONSOLE=y
911CONFIG_SERIAL_CORE=y 1084CONFIG_SERIAL_CORE=y
912CONFIG_SERIAL_CORE_CONSOLE=y 1085CONFIG_SERIAL_CORE_CONSOLE=y
913# CONFIG_SERIAL_JSM is not set 1086# CONFIG_SERIAL_JSM is not set
1087# CONFIG_SERIAL_TIMBERDALE is not set
914CONFIG_UNIX98_PTYS=y 1088CONFIG_UNIX98_PTYS=y
1089# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
915CONFIG_LEGACY_PTYS=y 1090CONFIG_LEGACY_PTYS=y
916CONFIG_LEGACY_PTY_COUNT=256 1091CONFIG_LEGACY_PTY_COUNT=256
917# CONFIG_IPMI_HANDLER is not set 1092# CONFIG_IPMI_HANDLER is not set
@@ -923,89 +1098,99 @@ CONFIG_LEGACY_PTY_COUNT=256
923CONFIG_DEVPORT=y 1098CONFIG_DEVPORT=y
924CONFIG_I2C=y 1099CONFIG_I2C=y
925CONFIG_I2C_BOARDINFO=y 1100CONFIG_I2C_BOARDINFO=y
1101CONFIG_I2C_COMPAT=y
926CONFIG_I2C_CHARDEV=y 1102CONFIG_I2C_CHARDEV=y
1103CONFIG_I2C_HELPER_AUTO=y
927 1104
928# 1105#
929# I2C Hardware Bus support 1106# I2C Hardware Bus support
930# 1107#
1108
1109#
1110# PC SMBus host controller drivers
1111#
931# CONFIG_I2C_ALI1535 is not set 1112# CONFIG_I2C_ALI1535 is not set
932# CONFIG_I2C_ALI1563 is not set 1113# CONFIG_I2C_ALI1563 is not set
933# CONFIG_I2C_ALI15X3 is not set 1114# CONFIG_I2C_ALI15X3 is not set
934# CONFIG_I2C_AMD756 is not set 1115# CONFIG_I2C_AMD756 is not set
935# CONFIG_I2C_AMD8111 is not set 1116# CONFIG_I2C_AMD8111 is not set
936# CONFIG_I2C_I801 is not set 1117# CONFIG_I2C_I801 is not set
937# CONFIG_I2C_I810 is not set 1118# CONFIG_I2C_ISCH is not set
938# CONFIG_I2C_PIIX4 is not set 1119# CONFIG_I2C_PIIX4 is not set
939# CONFIG_I2C_NFORCE2 is not set 1120# CONFIG_I2C_NFORCE2 is not set
940# CONFIG_I2C_OCORES is not set
941# CONFIG_I2C_PARPORT_LIGHT is not set
942# CONFIG_I2C_PROSAVAGE is not set
943# CONFIG_I2C_SAVAGE4 is not set
944CONFIG_I2C_SIBYTE=y
945# CONFIG_I2C_SIMTEC is not set
946# CONFIG_I2C_SIS5595 is not set 1121# CONFIG_I2C_SIS5595 is not set
947# CONFIG_I2C_SIS630 is not set 1122# CONFIG_I2C_SIS630 is not set
948# CONFIG_I2C_SIS96X is not set 1123# CONFIG_I2C_SIS96X is not set
949# CONFIG_I2C_TAOS_EVM is not set
950# CONFIG_I2C_STUB is not set
951# CONFIG_I2C_VIA is not set 1124# CONFIG_I2C_VIA is not set
952# CONFIG_I2C_VIAPRO is not set 1125# CONFIG_I2C_VIAPRO is not set
953# CONFIG_I2C_VOODOO3 is not set
954# CONFIG_I2C_PCA_PLATFORM is not set
955 1126
956# 1127#
957# Miscellaneous I2C Chip support 1128# I2C system bus drivers (mostly embedded / system-on-chip)
958# 1129#
959# CONFIG_DS1682 is not set 1130# CONFIG_I2C_OCORES is not set
960CONFIG_EEPROM_LEGACY=y 1131# CONFIG_I2C_SIMTEC is not set
961CONFIG_SENSORS_PCF8574=y 1132# CONFIG_I2C_XILINX is not set
962# CONFIG_PCF8575 is not set 1133
963CONFIG_SENSORS_PCF8591=y 1134#
964CONFIG_EEPROM_MAX6875=y 1135# External I2C/SMBus adapter drivers
965# CONFIG_SENSORS_TSL2550 is not set 1136#
1137# CONFIG_I2C_PARPORT_LIGHT is not set
1138# CONFIG_I2C_TAOS_EVM is not set
1139
1140#
1141# Other I2C/SMBus bus drivers
1142#
1143# CONFIG_I2C_PCA_PLATFORM is not set
1144CONFIG_I2C_SIBYTE=y
1145# CONFIG_I2C_STUB is not set
966CONFIG_I2C_DEBUG_CORE=y 1146CONFIG_I2C_DEBUG_CORE=y
967CONFIG_I2C_DEBUG_ALGO=y 1147CONFIG_I2C_DEBUG_ALGO=y
968CONFIG_I2C_DEBUG_BUS=y 1148CONFIG_I2C_DEBUG_BUS=y
969CONFIG_I2C_DEBUG_CHIP=y
970# CONFIG_SPI is not set 1149# CONFIG_SPI is not set
1150
1151#
1152# PPS support
1153#
1154# CONFIG_PPS is not set
971# CONFIG_W1 is not set 1155# CONFIG_W1 is not set
972# CONFIG_POWER_SUPPLY is not set 1156# CONFIG_POWER_SUPPLY is not set
973# CONFIG_HWMON is not set 1157# CONFIG_HWMON is not set
974# CONFIG_THERMAL is not set 1158# CONFIG_THERMAL is not set
975# CONFIG_THERMAL_HWMON is not set
976# CONFIG_WATCHDOG is not set 1159# CONFIG_WATCHDOG is not set
1160CONFIG_SSB_POSSIBLE=y
977 1161
978# 1162#
979# Sonics Silicon Backplane 1163# Sonics Silicon Backplane
980# 1164#
981CONFIG_SSB_POSSIBLE=y
982# CONFIG_SSB is not set 1165# CONFIG_SSB is not set
983 1166
984# 1167#
985# Multifunction device drivers 1168# Multifunction device drivers
986# 1169#
1170# CONFIG_MFD_CORE is not set
1171# CONFIG_MFD_88PM860X is not set
987# CONFIG_MFD_SM501 is not set 1172# CONFIG_MFD_SM501 is not set
988# CONFIG_HTC_PASIC3 is not set 1173# CONFIG_HTC_PASIC3 is not set
989 1174# CONFIG_TWL4030_CORE is not set
990# 1175# CONFIG_MFD_TMIO is not set
991# Multimedia devices 1176# CONFIG_PMIC_DA903X is not set
992# 1177# CONFIG_PMIC_ADP5520 is not set
993 1178# CONFIG_MFD_MAX8925 is not set
994# 1179# CONFIG_MFD_WM8400 is not set
995# Multimedia core support 1180# CONFIG_MFD_WM831X is not set
996# 1181# CONFIG_MFD_WM8350_I2C is not set
997# CONFIG_VIDEO_DEV is not set 1182# CONFIG_MFD_WM8994 is not set
998# CONFIG_DVB_CORE is not set 1183# CONFIG_MFD_PCF50633 is not set
999# CONFIG_VIDEO_MEDIA is not set 1184# CONFIG_AB3100_CORE is not set
1000 1185# CONFIG_LPC_SCH is not set
1001# 1186# CONFIG_REGULATOR is not set
1002# Multimedia drivers 1187# CONFIG_MEDIA_SUPPORT is not set
1003#
1004# CONFIG_DAB is not set
1005 1188
1006# 1189#
1007# Graphics support 1190# Graphics support
1008# 1191#
1192CONFIG_VGA_ARB=y
1193CONFIG_VGA_ARB_MAX_GPUS=16
1009# CONFIG_DRM is not set 1194# CONFIG_DRM is not set
1010# CONFIG_VGASTATE is not set 1195# CONFIG_VGASTATE is not set
1011# CONFIG_VIDEO_OUTPUT_CONTROL is not set 1196# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1016,10 +1201,6 @@ CONFIG_SSB_POSSIBLE=y
1016# Display device support 1201# Display device support
1017# 1202#
1018# CONFIG_DISPLAY_SUPPORT is not set 1203# CONFIG_DISPLAY_SUPPORT is not set
1019
1020#
1021# Sound
1022#
1023# CONFIG_SOUND is not set 1204# CONFIG_SOUND is not set
1024CONFIG_USB_SUPPORT=y 1205CONFIG_USB_SUPPORT=y
1025CONFIG_USB_ARCH_HAS_HCD=y 1206CONFIG_USB_ARCH_HAS_HCD=y
@@ -1030,9 +1211,18 @@ CONFIG_USB_ARCH_HAS_EHCI=y
1030# CONFIG_USB_OTG_BLACKLIST_HUB is not set 1211# CONFIG_USB_OTG_BLACKLIST_HUB is not set
1031 1212
1032# 1213#
1033# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' 1214# Enable Host or Gadget support to see Inventra options
1215#
1216
1217#
1218# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
1034# 1219#
1035# CONFIG_USB_GADGET is not set 1220# CONFIG_USB_GADGET is not set
1221
1222#
1223# OTG and related infrastructure
1224#
1225# CONFIG_UWB is not set
1036# CONFIG_MMC is not set 1226# CONFIG_MMC is not set
1037# CONFIG_MEMSTICK is not set 1227# CONFIG_MEMSTICK is not set
1038# CONFIG_NEW_LEDS is not set 1228# CONFIG_NEW_LEDS is not set
@@ -1040,41 +1230,66 @@ CONFIG_USB_ARCH_HAS_EHCI=y
1040# CONFIG_INFINIBAND is not set 1230# CONFIG_INFINIBAND is not set
1041CONFIG_RTC_LIB=y 1231CONFIG_RTC_LIB=y
1042# CONFIG_RTC_CLASS is not set 1232# CONFIG_RTC_CLASS is not set
1233# CONFIG_DMADEVICES is not set
1234# CONFIG_AUXDISPLAY is not set
1043# CONFIG_UIO is not set 1235# CONFIG_UIO is not set
1044 1236
1045# 1237#
1238# TI VLYNQ
1239#
1240# CONFIG_STAGING is not set
1241
1242#
1046# File systems 1243# File systems
1047# 1244#
1048CONFIG_EXT2_FS=m 1245CONFIG_EXT2_FS=m
1049CONFIG_EXT2_FS_XATTR=y 1246CONFIG_EXT2_FS_XATTR=y
1050# CONFIG_EXT2_FS_POSIX_ACL is not set 1247CONFIG_EXT2_FS_POSIX_ACL=y
1051# CONFIG_EXT2_FS_SECURITY is not set 1248CONFIG_EXT2_FS_SECURITY=y
1052# CONFIG_EXT2_FS_XIP is not set 1249CONFIG_EXT2_FS_XIP=y
1053CONFIG_EXT3_FS=y 1250CONFIG_EXT3_FS=m
1251CONFIG_EXT3_DEFAULTS_TO_ORDERED=y
1054CONFIG_EXT3_FS_XATTR=y 1252CONFIG_EXT3_FS_XATTR=y
1055# CONFIG_EXT3_FS_POSIX_ACL is not set 1253CONFIG_EXT3_FS_POSIX_ACL=y
1056# CONFIG_EXT3_FS_SECURITY is not set 1254CONFIG_EXT3_FS_SECURITY=y
1057# CONFIG_EXT4DEV_FS is not set 1255CONFIG_EXT4_FS=y
1058CONFIG_JBD=y 1256CONFIG_EXT4_FS_XATTR=y
1257CONFIG_EXT4_FS_POSIX_ACL=y
1258CONFIG_EXT4_FS_SECURITY=y
1259# CONFIG_EXT4_DEBUG is not set
1260CONFIG_FS_XIP=y
1261CONFIG_JBD=m
1262CONFIG_JBD2=y
1059CONFIG_FS_MBCACHE=y 1263CONFIG_FS_MBCACHE=y
1060# CONFIG_REISERFS_FS is not set 1264# CONFIG_REISERFS_FS is not set
1061# CONFIG_JFS_FS is not set 1265# CONFIG_JFS_FS is not set
1062# CONFIG_FS_POSIX_ACL is not set 1266CONFIG_FS_POSIX_ACL=y
1063# CONFIG_XFS_FS is not set 1267# CONFIG_XFS_FS is not set
1064# CONFIG_GFS2_FS is not set 1268# CONFIG_GFS2_FS is not set
1065# CONFIG_OCFS2_FS is not set 1269# CONFIG_OCFS2_FS is not set
1270# CONFIG_BTRFS_FS is not set
1271# CONFIG_NILFS2_FS is not set
1272CONFIG_FILE_LOCKING=y
1273CONFIG_FSNOTIFY=y
1066CONFIG_DNOTIFY=y 1274CONFIG_DNOTIFY=y
1067CONFIG_INOTIFY=y 1275CONFIG_INOTIFY=y
1068CONFIG_INOTIFY_USER=y 1276CONFIG_INOTIFY_USER=y
1069CONFIG_QUOTA=y 1277CONFIG_QUOTA=y
1070CONFIG_QUOTA_NETLINK_INTERFACE=y 1278CONFIG_QUOTA_NETLINK_INTERFACE=y
1071# CONFIG_PRINT_QUOTA_WARNING is not set 1279# CONFIG_PRINT_QUOTA_WARNING is not set
1280CONFIG_QUOTA_TREE=m
1072# CONFIG_QFMT_V1 is not set 1281# CONFIG_QFMT_V1 is not set
1073CONFIG_QFMT_V2=m 1282CONFIG_QFMT_V2=m
1074CONFIG_QUOTACTL=y 1283CONFIG_QUOTACTL=y
1075CONFIG_AUTOFS_FS=m 1284CONFIG_AUTOFS_FS=m
1076CONFIG_AUTOFS4_FS=m 1285CONFIG_AUTOFS4_FS=m
1077CONFIG_FUSE_FS=m 1286CONFIG_FUSE_FS=m
1287# CONFIG_CUSE is not set
1288
1289#
1290# Caches
1291#
1292# CONFIG_FSCACHE is not set
1078 1293
1079# 1294#
1080# CD-ROM/DVD Filesystems 1295# CD-ROM/DVD Filesystems
@@ -1103,15 +1318,13 @@ CONFIG_NTFS_RW=y
1103CONFIG_PROC_FS=y 1318CONFIG_PROC_FS=y
1104CONFIG_PROC_KCORE=y 1319CONFIG_PROC_KCORE=y
1105CONFIG_PROC_SYSCTL=y 1320CONFIG_PROC_SYSCTL=y
1321CONFIG_PROC_PAGE_MONITOR=y
1106CONFIG_SYSFS=y 1322CONFIG_SYSFS=y
1107CONFIG_TMPFS=y 1323CONFIG_TMPFS=y
1108# CONFIG_TMPFS_POSIX_ACL is not set 1324# CONFIG_TMPFS_POSIX_ACL is not set
1109# CONFIG_HUGETLB_PAGE is not set 1325# CONFIG_HUGETLB_PAGE is not set
1110CONFIG_CONFIGFS_FS=m 1326CONFIG_CONFIGFS_FS=m
1111 1327CONFIG_MISC_FILESYSTEMS=y
1112#
1113# Miscellaneous filesystems
1114#
1115# CONFIG_ADFS_FS is not set 1328# CONFIG_ADFS_FS is not set
1116# CONFIG_AFFS_FS is not set 1329# CONFIG_AFFS_FS is not set
1117# CONFIG_ECRYPT_FS is not set 1330# CONFIG_ECRYPT_FS is not set
@@ -1120,9 +1333,12 @@ CONFIG_CONFIGFS_FS=m
1120# CONFIG_BEFS_FS is not set 1333# CONFIG_BEFS_FS is not set
1121# CONFIG_BFS_FS is not set 1334# CONFIG_BFS_FS is not set
1122# CONFIG_EFS_FS is not set 1335# CONFIG_EFS_FS is not set
1336# CONFIG_LOGFS is not set
1123# CONFIG_CRAMFS is not set 1337# CONFIG_CRAMFS is not set
1338# CONFIG_SQUASHFS is not set
1124# CONFIG_VXFS_FS is not set 1339# CONFIG_VXFS_FS is not set
1125# CONFIG_MINIX_FS is not set 1340# CONFIG_MINIX_FS is not set
1341# CONFIG_OMFS_FS is not set
1126# CONFIG_HPFS_FS is not set 1342# CONFIG_HPFS_FS is not set
1127# CONFIG_QNX4FS_FS is not set 1343# CONFIG_QNX4FS_FS is not set
1128# CONFIG_ROMFS_FS is not set 1344# CONFIG_ROMFS_FS is not set
@@ -1133,16 +1349,17 @@ CONFIG_NFS_FS=y
1133CONFIG_NFS_V3=y 1349CONFIG_NFS_V3=y
1134# CONFIG_NFS_V3_ACL is not set 1350# CONFIG_NFS_V3_ACL is not set
1135# CONFIG_NFS_V4 is not set 1351# CONFIG_NFS_V4 is not set
1136# CONFIG_NFSD is not set
1137CONFIG_ROOT_NFS=y 1352CONFIG_ROOT_NFS=y
1353# CONFIG_NFSD is not set
1138CONFIG_LOCKD=y 1354CONFIG_LOCKD=y
1139CONFIG_LOCKD_V4=y 1355CONFIG_LOCKD_V4=y
1140CONFIG_NFS_COMMON=y 1356CONFIG_NFS_COMMON=y
1141CONFIG_SUNRPC=y 1357CONFIG_SUNRPC=y
1142# CONFIG_SUNRPC_BIND34 is not set 1358CONFIG_SUNRPC_GSS=m
1143# CONFIG_RPCSEC_GSS_KRB5 is not set 1359CONFIG_RPCSEC_GSS_KRB5=m
1144# CONFIG_RPCSEC_GSS_SPKM3 is not set 1360CONFIG_RPCSEC_GSS_SPKM3=m
1145# CONFIG_SMB_FS is not set 1361# CONFIG_SMB_FS is not set
1362# CONFIG_CEPH_FS is not set
1146# CONFIG_CIFS is not set 1363# CONFIG_CIFS is not set
1147# CONFIG_NCP_FS is not set 1364# CONFIG_NCP_FS is not set
1148# CONFIG_CODA_FS is not set 1365# CONFIG_CODA_FS is not set
@@ -1205,12 +1422,18 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
1205CONFIG_ENABLE_MUST_CHECK=y 1422CONFIG_ENABLE_MUST_CHECK=y
1206CONFIG_FRAME_WARN=2048 1423CONFIG_FRAME_WARN=2048
1207CONFIG_MAGIC_SYSRQ=y 1424CONFIG_MAGIC_SYSRQ=y
1425# CONFIG_STRIP_ASM_SYMS is not set
1208# CONFIG_UNUSED_SYMBOLS is not set 1426# CONFIG_UNUSED_SYMBOLS is not set
1209# CONFIG_DEBUG_FS is not set 1427# CONFIG_DEBUG_FS is not set
1210# CONFIG_HEADERS_CHECK is not set 1428# CONFIG_HEADERS_CHECK is not set
1211CONFIG_DEBUG_KERNEL=y 1429CONFIG_DEBUG_KERNEL=y
1212# CONFIG_DEBUG_SHIRQ is not set 1430# CONFIG_DEBUG_SHIRQ is not set
1213CONFIG_DETECT_SOFTLOCKUP=y 1431CONFIG_DETECT_SOFTLOCKUP=y
1432# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1433CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1434CONFIG_DETECT_HUNG_TASK=y
1435# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1436CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1214CONFIG_SCHED_DEBUG=y 1437CONFIG_SCHED_DEBUG=y
1215# CONFIG_SCHEDSTATS is not set 1438# CONFIG_SCHEDSTATS is not set
1216# CONFIG_TIMER_STATS is not set 1439# CONFIG_TIMER_STATS is not set
@@ -1219,23 +1442,53 @@ CONFIG_SCHED_DEBUG=y
1219# CONFIG_DEBUG_RT_MUTEXES is not set 1442# CONFIG_DEBUG_RT_MUTEXES is not set
1220# CONFIG_RT_MUTEX_TESTER is not set 1443# CONFIG_RT_MUTEX_TESTER is not set
1221# CONFIG_DEBUG_SPINLOCK is not set 1444# CONFIG_DEBUG_SPINLOCK is not set
1222CONFIG_DEBUG_MUTEXES=y 1445# CONFIG_DEBUG_MUTEXES is not set
1223# CONFIG_DEBUG_LOCK_ALLOC is not set 1446# CONFIG_DEBUG_LOCK_ALLOC is not set
1224# CONFIG_PROVE_LOCKING is not set 1447# CONFIG_PROVE_LOCKING is not set
1225# CONFIG_LOCK_STAT is not set 1448# CONFIG_LOCK_STAT is not set
1226# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1449CONFIG_DEBUG_SPINLOCK_SLEEP=y
1227# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1450# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1228# CONFIG_DEBUG_KOBJECT is not set 1451# CONFIG_DEBUG_KOBJECT is not set
1229# CONFIG_DEBUG_INFO is not set 1452# CONFIG_DEBUG_INFO is not set
1230# CONFIG_DEBUG_VM is not set 1453# CONFIG_DEBUG_VM is not set
1231# CONFIG_DEBUG_WRITECOUNT is not set 1454# CONFIG_DEBUG_WRITECOUNT is not set
1232# CONFIG_DEBUG_LIST is not set 1455CONFIG_DEBUG_MEMORY_INIT=y
1456CONFIG_DEBUG_LIST=y
1233# CONFIG_DEBUG_SG is not set 1457# CONFIG_DEBUG_SG is not set
1458# CONFIG_DEBUG_NOTIFIERS is not set
1459# CONFIG_DEBUG_CREDENTIALS is not set
1234# CONFIG_BOOT_PRINTK_DELAY is not set 1460# CONFIG_BOOT_PRINTK_DELAY is not set
1235# CONFIG_RCU_TORTURE_TEST is not set 1461# CONFIG_RCU_TORTURE_TEST is not set
1462CONFIG_RCU_CPU_STALL_DETECTOR=y
1236# CONFIG_BACKTRACE_SELF_TEST is not set 1463# CONFIG_BACKTRACE_SELF_TEST is not set
1464# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1465# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1237# CONFIG_FAULT_INJECTION is not set 1466# CONFIG_FAULT_INJECTION is not set
1467# CONFIG_SYSCTL_SYSCALL_CHECK is not set
1468# CONFIG_PAGE_POISONING is not set
1469CONFIG_HAVE_FUNCTION_TRACER=y
1470CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
1471CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
1472CONFIG_HAVE_DYNAMIC_FTRACE=y
1473CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
1474CONFIG_TRACING_SUPPORT=y
1475CONFIG_FTRACE=y
1476# CONFIG_FUNCTION_TRACER is not set
1477# CONFIG_IRQSOFF_TRACER is not set
1478# CONFIG_SCHED_TRACER is not set
1479# CONFIG_ENABLE_DEFAULT_TRACERS is not set
1480# CONFIG_BOOT_TRACER is not set
1481CONFIG_BRANCH_PROFILE_NONE=y
1482# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1483# CONFIG_PROFILE_ALL_BRANCHES is not set
1484# CONFIG_STACK_TRACER is not set
1485# CONFIG_KMEMTRACE is not set
1486# CONFIG_WORKQUEUE_TRACER is not set
1487# CONFIG_BLK_DEV_IO_TRACE is not set
1238# CONFIG_SAMPLES is not set 1488# CONFIG_SAMPLES is not set
1489CONFIG_HAVE_ARCH_KGDB=y
1490# CONFIG_KGDB is not set
1491CONFIG_EARLY_PRINTK=y
1239# CONFIG_CMDLINE_BOOL is not set 1492# CONFIG_CMDLINE_BOOL is not set
1240# CONFIG_DEBUG_STACK_USAGE is not set 1493# CONFIG_DEBUG_STACK_USAGE is not set
1241# CONFIG_SB1XXX_CORELIS is not set 1494# CONFIG_SB1XXX_CORELIS is not set
@@ -1246,20 +1499,50 @@ CONFIG_DEBUG_MUTEXES=y
1246# 1499#
1247CONFIG_KEYS=y 1500CONFIG_KEYS=y
1248CONFIG_KEYS_DEBUG_PROC_KEYS=y 1501CONFIG_KEYS_DEBUG_PROC_KEYS=y
1249# CONFIG_SECURITY is not set 1502CONFIG_SECURITY=y
1250# CONFIG_SECURITY_FILE_CAPABILITIES is not set 1503# CONFIG_SECURITYFS is not set
1504CONFIG_SECURITY_NETWORK=y
1505CONFIG_SECURITY_NETWORK_XFRM=y
1506# CONFIG_SECURITY_PATH is not set
1507CONFIG_LSM_MMAP_MIN_ADDR=65536
1508CONFIG_SECURITY_SELINUX=y
1509CONFIG_SECURITY_SELINUX_BOOTPARAM=y
1510CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1
1511CONFIG_SECURITY_SELINUX_DISABLE=y
1512CONFIG_SECURITY_SELINUX_DEVELOP=y
1513CONFIG_SECURITY_SELINUX_AVC_STATS=y
1514CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
1515# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
1516# CONFIG_SECURITY_SMACK is not set
1517# CONFIG_SECURITY_TOMOYO is not set
1518# CONFIG_DEFAULT_SECURITY_SELINUX is not set
1519# CONFIG_DEFAULT_SECURITY_SMACK is not set
1520# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
1521CONFIG_DEFAULT_SECURITY_DAC=y
1522CONFIG_DEFAULT_SECURITY=""
1251CONFIG_CRYPTO=y 1523CONFIG_CRYPTO=y
1252 1524
1253# 1525#
1254# Crypto core or helper 1526# Crypto core or helper
1255# 1527#
1528# CONFIG_CRYPTO_FIPS is not set
1256CONFIG_CRYPTO_ALGAPI=y 1529CONFIG_CRYPTO_ALGAPI=y
1530CONFIG_CRYPTO_ALGAPI2=y
1257CONFIG_CRYPTO_AEAD=m 1531CONFIG_CRYPTO_AEAD=m
1532CONFIG_CRYPTO_AEAD2=y
1258CONFIG_CRYPTO_BLKCIPHER=y 1533CONFIG_CRYPTO_BLKCIPHER=y
1534CONFIG_CRYPTO_BLKCIPHER2=y
1259CONFIG_CRYPTO_HASH=y 1535CONFIG_CRYPTO_HASH=y
1536CONFIG_CRYPTO_HASH2=y
1537CONFIG_CRYPTO_RNG=m
1538CONFIG_CRYPTO_RNG2=y
1539CONFIG_CRYPTO_PCOMP=y
1260CONFIG_CRYPTO_MANAGER=y 1540CONFIG_CRYPTO_MANAGER=y
1541CONFIG_CRYPTO_MANAGER2=y
1261CONFIG_CRYPTO_GF128MUL=m 1542CONFIG_CRYPTO_GF128MUL=m
1262CONFIG_CRYPTO_NULL=y 1543CONFIG_CRYPTO_NULL=y
1544# CONFIG_CRYPTO_PCRYPT is not set
1545CONFIG_CRYPTO_WORKQUEUE=y
1263# CONFIG_CRYPTO_CRYPTD is not set 1546# CONFIG_CRYPTO_CRYPTD is not set
1264CONFIG_CRYPTO_AUTHENC=m 1547CONFIG_CRYPTO_AUTHENC=m
1265# CONFIG_CRYPTO_TEST is not set 1548# CONFIG_CRYPTO_TEST is not set
@@ -1276,7 +1559,7 @@ CONFIG_CRYPTO_SEQIV=m
1276# 1559#
1277CONFIG_CRYPTO_CBC=m 1560CONFIG_CRYPTO_CBC=m
1278CONFIG_CRYPTO_CTR=m 1561CONFIG_CRYPTO_CTR=m
1279# CONFIG_CRYPTO_CTS is not set 1562CONFIG_CRYPTO_CTS=m
1280CONFIG_CRYPTO_ECB=m 1563CONFIG_CRYPTO_ECB=m
1281CONFIG_CRYPTO_LRW=m 1564CONFIG_CRYPTO_LRW=m
1282CONFIG_CRYPTO_PCBC=m 1565CONFIG_CRYPTO_PCBC=m
@@ -1287,14 +1570,20 @@ CONFIG_CRYPTO_XTS=m
1287# 1570#
1288CONFIG_CRYPTO_HMAC=y 1571CONFIG_CRYPTO_HMAC=y
1289CONFIG_CRYPTO_XCBC=m 1572CONFIG_CRYPTO_XCBC=m
1573CONFIG_CRYPTO_VMAC=m
1290 1574
1291# 1575#
1292# Digest 1576# Digest
1293# 1577#
1294# CONFIG_CRYPTO_CRC32C is not set 1578CONFIG_CRYPTO_CRC32C=m
1579CONFIG_CRYPTO_GHASH=m
1295CONFIG_CRYPTO_MD4=m 1580CONFIG_CRYPTO_MD4=m
1296CONFIG_CRYPTO_MD5=y 1581CONFIG_CRYPTO_MD5=y
1297CONFIG_CRYPTO_MICHAEL_MIC=m 1582CONFIG_CRYPTO_MICHAEL_MIC=m
1583CONFIG_CRYPTO_RMD128=m
1584CONFIG_CRYPTO_RMD160=m
1585CONFIG_CRYPTO_RMD256=m
1586CONFIG_CRYPTO_RMD320=m
1298CONFIG_CRYPTO_SHA1=m 1587CONFIG_CRYPTO_SHA1=m
1299CONFIG_CRYPTO_SHA256=m 1588CONFIG_CRYPTO_SHA256=m
1300CONFIG_CRYPTO_SHA512=m 1589CONFIG_CRYPTO_SHA512=m
@@ -1325,25 +1614,36 @@ CONFIG_CRYPTO_TWOFISH_COMMON=m
1325# Compression 1614# Compression
1326# 1615#
1327CONFIG_CRYPTO_DEFLATE=m 1616CONFIG_CRYPTO_DEFLATE=m
1328# CONFIG_CRYPTO_LZO is not set 1617CONFIG_CRYPTO_ZLIB=m
1618CONFIG_CRYPTO_LZO=m
1619
1620#
1621# Random Number Generation
1622#
1623CONFIG_CRYPTO_ANSI_CPRNG=m
1329CONFIG_CRYPTO_HW=y 1624CONFIG_CRYPTO_HW=y
1330# CONFIG_CRYPTO_DEV_HIFN_795X is not set 1625# CONFIG_CRYPTO_DEV_HIFN_795X is not set
1626# CONFIG_BINARY_PRINTF is not set
1331 1627
1332# 1628#
1333# Library routines 1629# Library routines
1334# 1630#
1335CONFIG_BITREVERSE=y 1631CONFIG_BITREVERSE=y
1336# CONFIG_GENERIC_FIND_FIRST_BIT is not set 1632CONFIG_GENERIC_FIND_LAST_BIT=y
1337CONFIG_CRC_CCITT=m 1633CONFIG_CRC_CCITT=m
1338# CONFIG_CRC16 is not set 1634CONFIG_CRC16=y
1635CONFIG_CRC_T10DIF=m
1339CONFIG_CRC_ITU_T=m 1636CONFIG_CRC_ITU_T=m
1340CONFIG_CRC32=y 1637CONFIG_CRC32=y
1341# CONFIG_CRC7 is not set 1638CONFIG_CRC7=m
1342CONFIG_LIBCRC32C=m 1639CONFIG_LIBCRC32C=m
1343CONFIG_AUDIT_GENERIC=y 1640CONFIG_AUDIT_GENERIC=y
1344CONFIG_ZLIB_INFLATE=m 1641CONFIG_ZLIB_INFLATE=y
1345CONFIG_ZLIB_DEFLATE=m 1642CONFIG_ZLIB_DEFLATE=m
1346CONFIG_PLIST=y 1643CONFIG_LZO_COMPRESS=m
1644CONFIG_LZO_DECOMPRESS=m
1645CONFIG_DECOMPRESS_GZIP=y
1347CONFIG_HAS_IOMEM=y 1646CONFIG_HAS_IOMEM=y
1348CONFIG_HAS_IOPORT=y 1647CONFIG_HAS_IOPORT=y
1349CONFIG_HAS_DMA=y 1648CONFIG_HAS_DMA=y
1649CONFIG_NLATTR=y
diff --git a/arch/mips/include/asm/abi.h b/arch/mips/include/asm/abi.h
index 1dd74fbdc09b..9252d9b50e59 100644
--- a/arch/mips/include/asm/abi.h
+++ b/arch/mips/include/asm/abi.h
@@ -13,12 +13,14 @@
13#include <asm/siginfo.h> 13#include <asm/siginfo.h>
14 14
15struct mips_abi { 15struct mips_abi {
16 int (* const setup_frame)(struct k_sigaction * ka, 16 int (* const setup_frame)(void *sig_return, struct k_sigaction *ka,
17 struct pt_regs *regs, int signr, 17 struct pt_regs *regs, int signr,
18 sigset_t *set); 18 sigset_t *set);
19 int (* const setup_rt_frame)(struct k_sigaction * ka, 19 const unsigned long signal_return_offset;
20 int (* const setup_rt_frame)(void *sig_return, struct k_sigaction *ka,
20 struct pt_regs *regs, int signr, 21 struct pt_regs *regs, int signr,
21 sigset_t *set, siginfo_t *info); 22 sigset_t *set, siginfo_t *info);
23 const unsigned long rt_signal_return_offset;
22 const unsigned long restart; 24 const unsigned long restart;
23}; 25};
24 26
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index e53d7bed5cda..ea77a42c5f8c 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -310,6 +310,7 @@ do { \
310 310
311#endif /* CONFIG_64BIT */ 311#endif /* CONFIG_64BIT */
312 312
313struct pt_regs;
313struct task_struct; 314struct task_struct;
314 315
315extern void elf_dump_regs(elf_greg_t *, struct pt_regs *regs); 316extern void elf_dump_regs(elf_greg_t *, struct pt_regs *regs);
@@ -367,4 +368,8 @@ extern const char *__elf_platform;
367#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) 368#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
368#endif 369#endif
369 370
371#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
372struct linux_binprm;
373extern int arch_setup_additional_pages(struct linux_binprm *bprm,
374 int uses_interp);
370#endif /* _ASM_ELF_H */ 375#endif /* _ASM_ELF_H */
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index aecada6f6117..3b4092705567 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -41,7 +41,11 @@ struct mips_fpu_emulator_stats {
41DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); 41DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
42 42
43#define MIPS_FPU_EMU_INC_STATS(M) \ 43#define MIPS_FPU_EMU_INC_STATS(M) \
44 cpu_local_wrap(__local_inc(&__get_cpu_var(fpuemustats).M)) 44do { \
45 preempt_disable(); \
46 __local_inc(&__get_cpu_var(fpuemustats).M); \
47 preempt_enable(); \
48} while (0)
45 49
46#else 50#else
47#define MIPS_FPU_EMU_INC_STATS(M) do { } while (0) 51#define MIPS_FPU_EMU_INC_STATS(M) do { } while (0)
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
index b12c4aca2cc9..96a2391ad85b 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
@@ -85,6 +85,7 @@ enum bcm63xx_regs_set {
85 RSET_TIMER, 85 RSET_TIMER,
86 RSET_WDT, 86 RSET_WDT,
87 RSET_UART0, 87 RSET_UART0,
88 RSET_UART1,
88 RSET_GPIO, 89 RSET_GPIO,
89 RSET_SPI, 90 RSET_SPI,
90 RSET_UDC0, 91 RSET_UDC0,
@@ -123,6 +124,7 @@ enum bcm63xx_regs_set {
123#define BCM_6338_TIMER_BASE (0xfffe0200) 124#define BCM_6338_TIMER_BASE (0xfffe0200)
124#define BCM_6338_WDT_BASE (0xfffe021c) 125#define BCM_6338_WDT_BASE (0xfffe021c)
125#define BCM_6338_UART0_BASE (0xfffe0300) 126#define BCM_6338_UART0_BASE (0xfffe0300)
127#define BCM_6338_UART1_BASE (0xdeadbeef)
126#define BCM_6338_GPIO_BASE (0xfffe0400) 128#define BCM_6338_GPIO_BASE (0xfffe0400)
127#define BCM_6338_SPI_BASE (0xfffe0c00) 129#define BCM_6338_SPI_BASE (0xfffe0c00)
128#define BCM_6338_UDC0_BASE (0xdeadbeef) 130#define BCM_6338_UDC0_BASE (0xdeadbeef)
@@ -153,6 +155,7 @@ enum bcm63xx_regs_set {
153#define BCM_6345_TIMER_BASE (0xfffe0200) 155#define BCM_6345_TIMER_BASE (0xfffe0200)
154#define BCM_6345_WDT_BASE (0xfffe021c) 156#define BCM_6345_WDT_BASE (0xfffe021c)
155#define BCM_6345_UART0_BASE (0xfffe0300) 157#define BCM_6345_UART0_BASE (0xfffe0300)
158#define BCM_6345_UART1_BASE (0xdeadbeef)
156#define BCM_6345_GPIO_BASE (0xfffe0400) 159#define BCM_6345_GPIO_BASE (0xfffe0400)
157#define BCM_6345_SPI_BASE (0xdeadbeef) 160#define BCM_6345_SPI_BASE (0xdeadbeef)
158#define BCM_6345_UDC0_BASE (0xdeadbeef) 161#define BCM_6345_UDC0_BASE (0xdeadbeef)
@@ -182,6 +185,7 @@ enum bcm63xx_regs_set {
182#define BCM_6348_TIMER_BASE (0xfffe0200) 185#define BCM_6348_TIMER_BASE (0xfffe0200)
183#define BCM_6348_WDT_BASE (0xfffe021c) 186#define BCM_6348_WDT_BASE (0xfffe021c)
184#define BCM_6348_UART0_BASE (0xfffe0300) 187#define BCM_6348_UART0_BASE (0xfffe0300)
188#define BCM_6348_UART1_BASE (0xdeadbeef)
185#define BCM_6348_GPIO_BASE (0xfffe0400) 189#define BCM_6348_GPIO_BASE (0xfffe0400)
186#define BCM_6348_SPI_BASE (0xfffe0c00) 190#define BCM_6348_SPI_BASE (0xfffe0c00)
187#define BCM_6348_UDC0_BASE (0xfffe1000) 191#define BCM_6348_UDC0_BASE (0xfffe1000)
@@ -208,6 +212,7 @@ enum bcm63xx_regs_set {
208#define BCM_6358_TIMER_BASE (0xfffe0040) 212#define BCM_6358_TIMER_BASE (0xfffe0040)
209#define BCM_6358_WDT_BASE (0xfffe005c) 213#define BCM_6358_WDT_BASE (0xfffe005c)
210#define BCM_6358_UART0_BASE (0xfffe0100) 214#define BCM_6358_UART0_BASE (0xfffe0100)
215#define BCM_6358_UART1_BASE (0xfffe0120)
211#define BCM_6358_GPIO_BASE (0xfffe0080) 216#define BCM_6358_GPIO_BASE (0xfffe0080)
212#define BCM_6358_SPI_BASE (0xdeadbeef) 217#define BCM_6358_SPI_BASE (0xdeadbeef)
213#define BCM_6358_UDC0_BASE (0xfffe0800) 218#define BCM_6358_UDC0_BASE (0xfffe0800)
@@ -246,6 +251,8 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set)
246 return BCM_6338_WDT_BASE; 251 return BCM_6338_WDT_BASE;
247 case RSET_UART0: 252 case RSET_UART0:
248 return BCM_6338_UART0_BASE; 253 return BCM_6338_UART0_BASE;
254 case RSET_UART1:
255 return BCM_6338_UART1_BASE;
249 case RSET_GPIO: 256 case RSET_GPIO:
250 return BCM_6338_GPIO_BASE; 257 return BCM_6338_GPIO_BASE;
251 case RSET_SPI: 258 case RSET_SPI:
@@ -292,6 +299,8 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set)
292 return BCM_6345_WDT_BASE; 299 return BCM_6345_WDT_BASE;
293 case RSET_UART0: 300 case RSET_UART0:
294 return BCM_6345_UART0_BASE; 301 return BCM_6345_UART0_BASE;
302 case RSET_UART1:
303 return BCM_6345_UART1_BASE;
295 case RSET_GPIO: 304 case RSET_GPIO:
296 return BCM_6345_GPIO_BASE; 305 return BCM_6345_GPIO_BASE;
297 case RSET_SPI: 306 case RSET_SPI:
@@ -338,6 +347,8 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set)
338 return BCM_6348_WDT_BASE; 347 return BCM_6348_WDT_BASE;
339 case RSET_UART0: 348 case RSET_UART0:
340 return BCM_6348_UART0_BASE; 349 return BCM_6348_UART0_BASE;
350 case RSET_UART1:
351 return BCM_6348_UART1_BASE;
341 case RSET_GPIO: 352 case RSET_GPIO:
342 return BCM_6348_GPIO_BASE; 353 return BCM_6348_GPIO_BASE;
343 case RSET_SPI: 354 case RSET_SPI:
@@ -384,6 +395,8 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set)
384 return BCM_6358_WDT_BASE; 395 return BCM_6358_WDT_BASE;
385 case RSET_UART0: 396 case RSET_UART0:
386 return BCM_6358_UART0_BASE; 397 return BCM_6358_UART0_BASE;
398 case RSET_UART1:
399 return BCM_6358_UART1_BASE;
387 case RSET_GPIO: 400 case RSET_GPIO:
388 return BCM_6358_GPIO_BASE; 401 return BCM_6358_GPIO_BASE;
389 case RSET_SPI: 402 case RSET_SPI:
@@ -429,6 +442,7 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set)
429enum bcm63xx_irq { 442enum bcm63xx_irq {
430 IRQ_TIMER = 0, 443 IRQ_TIMER = 0,
431 IRQ_UART0, 444 IRQ_UART0,
445 IRQ_UART1,
432 IRQ_DSL, 446 IRQ_DSL,
433 IRQ_ENET0, 447 IRQ_ENET0,
434 IRQ_ENET1, 448 IRQ_ENET1,
@@ -510,6 +524,7 @@ enum bcm63xx_irq {
510 */ 524 */
511#define BCM_6358_TIMER_IRQ (IRQ_INTERNAL_BASE + 0) 525#define BCM_6358_TIMER_IRQ (IRQ_INTERNAL_BASE + 0)
512#define BCM_6358_UART0_IRQ (IRQ_INTERNAL_BASE + 2) 526#define BCM_6358_UART0_IRQ (IRQ_INTERNAL_BASE + 2)
527#define BCM_6358_UART1_IRQ (IRQ_INTERNAL_BASE + 3)
513#define BCM_6358_OHCI0_IRQ (IRQ_INTERNAL_BASE + 5) 528#define BCM_6358_OHCI0_IRQ (IRQ_INTERNAL_BASE + 5)
514#define BCM_6358_ENET1_IRQ (IRQ_INTERNAL_BASE + 6) 529#define BCM_6358_ENET1_IRQ (IRQ_INTERNAL_BASE + 6)
515#define BCM_6358_ENET0_IRQ (IRQ_INTERNAL_BASE + 8) 530#define BCM_6358_ENET0_IRQ (IRQ_INTERNAL_BASE + 8)
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h
new file mode 100644
index 000000000000..23c705baf171
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h
@@ -0,0 +1,6 @@
1#ifndef BCM63XX_DEV_UART_H_
2#define BCM63XX_DEV_UART_H_
3
4int bcm63xx_uart_register(unsigned int id);
5
6#endif /* BCM63XX_DEV_UART_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
index 76a0b7216af5..43d4da0b1e9f 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
@@ -10,6 +10,10 @@ static inline unsigned long bcm63xx_gpio_count(void)
10 switch (bcm63xx_get_cpu_id()) { 10 switch (bcm63xx_get_cpu_id()) {
11 case BCM6358_CPU_ID: 11 case BCM6358_CPU_ID:
12 return 40; 12 return 40;
13 case BCM6338_CPU_ID:
14 return 8;
15 case BCM6345_CPU_ID:
16 return 16;
13 case BCM6348_CPU_ID: 17 case BCM6348_CPU_ID:
14 default: 18 default:
15 return 37; 19 return 37;
diff --git a/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h b/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h
index 6479090a4106..474daaa53497 100644
--- a/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h
+++ b/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h
@@ -45,6 +45,8 @@ struct board_info {
45 unsigned int has_ohci0:1; 45 unsigned int has_ohci0:1;
46 unsigned int has_ehci0:1; 46 unsigned int has_ehci0:1;
47 unsigned int has_dsp:1; 47 unsigned int has_dsp:1;
48 unsigned int has_uart0:1;
49 unsigned int has_uart1:1;
48 50
49 /* ethernet config */ 51 /* ethernet config */
50 struct bcm63xx_enet_platform_data enet0; 52 struct bcm63xx_enet_platform_data enet0;
diff --git a/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h b/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h
index 71742bac940d..f453c01d0672 100644
--- a/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h
@@ -24,7 +24,7 @@
24#define cpu_has_smartmips 0 24#define cpu_has_smartmips 0
25#define cpu_has_vtag_icache 0 25#define cpu_has_vtag_icache 0
26 26
27#if !defined(BCMCPU_RUNTIME_DETECT) && (defined(CONFIG_BCMCPU_IS_6348) || defined(CONFIG_CPU_IS_6338) || defined(CONFIG_CPU_IS_BCM6345)) 27#if !defined(BCMCPU_RUNTIME_DETECT) && (defined(CONFIG_BCM63XX_CPU_6348) || defined(CONFIG_BCM63XX_CPU_6345) || defined(CONFIG_BCM63XX_CPU_6338))
28#define cpu_has_dc_aliases 0 28#define cpu_has_dc_aliases 0
29#endif 29#endif
30 30
diff --git a/arch/mips/include/asm/mach-sibyte/war.h b/arch/mips/include/asm/mach-sibyte/war.h
index 7950ef4f032c..743385d7b5f2 100644
--- a/arch/mips/include/asm/mach-sibyte/war.h
+++ b/arch/mips/include/asm/mach-sibyte/war.h
@@ -16,7 +16,11 @@
16#if defined(CONFIG_SB1_PASS_1_WORKAROUNDS) || \ 16#if defined(CONFIG_SB1_PASS_1_WORKAROUNDS) || \
17 defined(CONFIG_SB1_PASS_2_WORKAROUNDS) 17 defined(CONFIG_SB1_PASS_2_WORKAROUNDS)
18 18
19#define BCM1250_M3_WAR 1 19#ifndef __ASSEMBLY__
20extern int sb1250_m3_workaround_needed(void);
21#endif
22
23#define BCM1250_M3_WAR sb1250_m3_workaround_needed()
20#define SIBYTE_1956_WAR 1 24#define SIBYTE_1956_WAR 1
21 25
22#else 26#else
diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h
index 4063edd79623..c436138945a8 100644
--- a/arch/mips/include/asm/mmu.h
+++ b/arch/mips/include/asm/mmu.h
@@ -1,6 +1,9 @@
1#ifndef __ASM_MMU_H 1#ifndef __ASM_MMU_H
2#define __ASM_MMU_H 2#define __ASM_MMU_H
3 3
4typedef unsigned long mm_context_t[NR_CPUS]; 4typedef struct {
5 unsigned long asid[NR_CPUS];
6 void *vdso;
7} mm_context_t;
5 8
6#endif /* __ASM_MMU_H */ 9#endif /* __ASM_MMU_H */
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 145bb81ccaa5..d9592733a7ba 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -104,7 +104,7 @@ extern unsigned long smtc_asid_mask;
104 104
105#endif 105#endif
106 106
107#define cpu_context(cpu, mm) ((mm)->context[cpu]) 107#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
108#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) 108#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
109#define asid_cache(cpu) (cpu_data[cpu].asid_cache) 109#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
110 110
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index ac32572430f4..a16beafcea91 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -188,8 +188,10 @@ typedef struct { unsigned long pgprot; } pgprot_t;
188#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 188#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
189 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 189 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
190 190
191#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE) 191#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE + \
192#define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET) 192 PHYS_OFFSET)
193#define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET - \
194 PHYS_OFFSET)
193 195
194#include <asm-generic/memory_model.h> 196#include <asm-generic/memory_model.h>
195#include <asm-generic/getorder.h> 197#include <asm-generic/getorder.h>
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 087a8884ef06..ab387910009a 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -33,13 +33,19 @@ extern void (*cpu_wait)(void);
33 33
34extern unsigned int vced_count, vcei_count; 34extern unsigned int vced_count, vcei_count;
35 35
36/*
37 * A special page (the vdso) is mapped into all processes at the very
38 * top of the virtual memory space.
39 */
40#define SPECIAL_PAGES_SIZE PAGE_SIZE
41
36#ifdef CONFIG_32BIT 42#ifdef CONFIG_32BIT
37/* 43/*
38 * User space process size: 2GB. This is hardcoded into a few places, 44 * User space process size: 2GB. This is hardcoded into a few places,
39 * so don't change it unless you know what you are doing. 45 * so don't change it unless you know what you are doing.
40 */ 46 */
41#define TASK_SIZE 0x7fff8000UL 47#define TASK_SIZE 0x7fff8000UL
42#define STACK_TOP TASK_SIZE 48#define STACK_TOP ((TASK_SIZE & PAGE_MASK) - SPECIAL_PAGES_SIZE)
43 49
44/* 50/*
45 * This decides where the kernel will search for a free chunk of vm 51 * This decides where the kernel will search for a free chunk of vm
@@ -59,7 +65,8 @@ extern unsigned int vced_count, vcei_count;
59#define TASK_SIZE32 0x7fff8000UL 65#define TASK_SIZE32 0x7fff8000UL
60#define TASK_SIZE 0x10000000000UL 66#define TASK_SIZE 0x10000000000UL
61#define STACK_TOP \ 67#define STACK_TOP \
62 (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE) 68 (((test_thread_flag(TIF_32BIT_ADDR) ? \
69 TASK_SIZE32 : TASK_SIZE) & PAGE_MASK) - SPECIAL_PAGES_SIZE)
63 70
64/* 71/*
65 * This decides where the kernel will search for a free chunk of vm 72 * This decides where the kernel will search for a free chunk of vm
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index 3b6da3330e32..c8419129e770 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -121,6 +121,25 @@
121 .endm 121 .endm
122#else 122#else
123 .macro get_saved_sp /* Uniprocessor variation */ 123 .macro get_saved_sp /* Uniprocessor variation */
124#ifdef CONFIG_CPU_LOONGSON2F
125 /*
126 * Clear BTB (branch target buffer), forbid RAS (return address
127 * stack) to workaround the Out-of-order Issue in Loongson2F
128 * via its diagnostic register.
129 */
130 move k0, ra
131 jal 1f
132 nop
1331: jal 1f
134 nop
1351: jal 1f
136 nop
1371: jal 1f
138 nop
1391: move ra, k0
140 li k0, 3
141 mtc0 k0, $22
142#endif /* CONFIG_CPU_LOONGSON2F */
124#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 143#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
125 lui k1, %hi(kernelsp) 144 lui k1, %hi(kernelsp)
126#else 145#else
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index b99bd07e199b..11a8b5252549 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -84,6 +84,7 @@ Ip_u2s3u1(_lw);
84Ip_u1u2u3(_mfc0); 84Ip_u1u2u3(_mfc0);
85Ip_u1u2u3(_mtc0); 85Ip_u1u2u3(_mtc0);
86Ip_u2u1u3(_ori); 86Ip_u2u1u3(_ori);
87Ip_u3u1u2(_or);
87Ip_u2s3u1(_pref); 88Ip_u2s3u1(_pref);
88Ip_0(_rfe); 89Ip_0(_rfe);
89Ip_u2s3u1(_sc); 90Ip_u2s3u1(_sc);
@@ -102,6 +103,7 @@ Ip_0(_tlbwr);
102Ip_u3u1u2(_xor); 103Ip_u3u1u2(_xor);
103Ip_u2u1u3(_xori); 104Ip_u2u1u3(_xori);
104Ip_u2u1msbu3(_dins); 105Ip_u2u1msbu3(_dins);
106Ip_u1(_syscall);
105 107
106/* Handle labels. */ 108/* Handle labels. */
107struct uasm_label { 109struct uasm_label {
diff --git a/arch/mips/include/asm/vdso.h b/arch/mips/include/asm/vdso.h
new file mode 100644
index 000000000000..cca56aa40ff4
--- /dev/null
+++ b/arch/mips/include/asm/vdso.h
@@ -0,0 +1,29 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2009 Cavium Networks
7 */
8
9#ifndef __ASM_VDSO_H
10#define __ASM_VDSO_H
11
12#include <linux/types.h>
13
14
15#ifdef CONFIG_32BIT
16struct mips_vdso {
17 u32 signal_trampoline[2];
18 u32 rt_signal_trampoline[2];
19};
20#else /* !CONFIG_32BIT */
21struct mips_vdso {
22 u32 o32_signal_trampoline[2];
23 u32 o32_rt_signal_trampoline[2];
24 u32 rt_signal_trampoline[2];
25 u32 n32_rt_signal_trampoline[2];
26};
27#endif /* CONFIG_32BIT */
28
29#endif /* __ASM_VDSO_H */
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index ef20957ca14b..7a6ac501cbb5 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -6,7 +6,7 @@ extra-y := head.o init_task.o vmlinux.lds
6 6
7obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ 7obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
8 ptrace.o reset.o setup.o signal.o syscall.o \ 8 ptrace.o reset.o setup.o signal.o syscall.o \
9 time.o topology.o traps.o unaligned.o watch.o 9 time.o topology.o traps.o unaligned.o watch.o vdso.o
10 10
11ifdef CONFIG_FUNCTION_TRACER 11ifdef CONFIG_FUNCTION_TRACER
12CFLAGS_REMOVE_ftrace.o = -pg 12CFLAGS_REMOVE_ftrace.o = -pg
diff --git a/arch/mips/kernel/cpufreq/loongson2_clock.c b/arch/mips/kernel/cpufreq/loongson2_clock.c
index d7ca256e33ef..cefc6e259baf 100644
--- a/arch/mips/kernel/cpufreq/loongson2_clock.c
+++ b/arch/mips/kernel/cpufreq/loongson2_clock.c
@@ -164,3 +164,7 @@ void loongson2_cpu_wait(void)
164 spin_unlock_irqrestore(&loongson2_wait_lock, flags); 164 spin_unlock_irqrestore(&loongson2_wait_lock, flags);
165} 165}
166EXPORT_SYMBOL_GPL(loongson2_cpu_wait); 166EXPORT_SYMBOL_GPL(loongson2_cpu_wait);
167
168MODULE_AUTHOR("Yanhua <yanh@lemote.com>");
169MODULE_DESCRIPTION("cpufreq driver for Loongson 2F");
170MODULE_LICENSE("GPL");
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 463b71b90a00..99960940d4a4 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -63,8 +63,13 @@ void __noreturn cpu_idle(void)
63 63
64 smtc_idle_loop_hook(); 64 smtc_idle_loop_hook();
65#endif 65#endif
66 if (cpu_wait) 66
67 if (cpu_wait) {
68 /* Don't trace irqs off for idle */
69 stop_critical_timings();
67 (*cpu_wait)(); 70 (*cpu_wait)();
71 start_critical_timings();
72 }
68 } 73 }
69#ifdef CONFIG_HOTPLUG_CPU 74#ifdef CONFIG_HOTPLUG_CPU
70 if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) && 75 if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) &&
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
index 6c8e8c4246f7..10263b405981 100644
--- a/arch/mips/kernel/signal-common.h
+++ b/arch/mips/kernel/signal-common.h
@@ -26,11 +26,6 @@
26 */ 26 */
27extern void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, 27extern void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
28 size_t frame_size); 28 size_t frame_size);
29/*
30 * install trampoline code to get back from the sig handler
31 */
32extern int install_sigtramp(unsigned int __user *tramp, unsigned int syscall);
33
34/* Check and clear pending FPU exceptions in saved CSR */ 29/* Check and clear pending FPU exceptions in saved CSR */
35extern int fpcsr_pending(unsigned int __user *fpcsr); 30extern int fpcsr_pending(unsigned int __user *fpcsr);
36 31
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index d0c68b5d717b..2099d5a4c4b7 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -32,6 +32,7 @@
32#include <asm/ucontext.h> 32#include <asm/ucontext.h>
33#include <asm/cpu-features.h> 33#include <asm/cpu-features.h>
34#include <asm/war.h> 34#include <asm/war.h>
35#include <asm/vdso.h>
35 36
36#include "signal-common.h" 37#include "signal-common.h"
37 38
@@ -44,47 +45,20 @@ extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
44extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc); 45extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc);
45extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc); 46extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc);
46 47
47/*
48 * Horribly complicated - with the bloody RM9000 workarounds enabled
49 * the signal trampolines is moving to the end of the structure so we can
50 * increase the alignment without breaking software compatibility.
51 */
52#if ICACHE_REFILLS_WORKAROUND_WAR == 0
53
54struct sigframe { 48struct sigframe {
55 u32 sf_ass[4]; /* argument save space for o32 */ 49 u32 sf_ass[4]; /* argument save space for o32 */
56 u32 sf_code[2]; /* signal trampoline */ 50 u32 sf_pad[2]; /* Was: signal trampoline */
57 struct sigcontext sf_sc; 51 struct sigcontext sf_sc;
58 sigset_t sf_mask; 52 sigset_t sf_mask;
59}; 53};
60 54
61struct rt_sigframe { 55struct rt_sigframe {
62 u32 rs_ass[4]; /* argument save space for o32 */ 56 u32 rs_ass[4]; /* argument save space for o32 */
63 u32 rs_code[2]; /* signal trampoline */ 57 u32 rs_pad[2]; /* Was: signal trampoline */
64 struct siginfo rs_info; 58 struct siginfo rs_info;
65 struct ucontext rs_uc; 59 struct ucontext rs_uc;
66}; 60};
67 61
68#else
69
70struct sigframe {
71 u32 sf_ass[4]; /* argument save space for o32 */
72 u32 sf_pad[2];
73 struct sigcontext sf_sc; /* hw context */
74 sigset_t sf_mask;
75 u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */
76};
77
78struct rt_sigframe {
79 u32 rs_ass[4]; /* argument save space for o32 */
80 u32 rs_pad[2];
81 struct siginfo rs_info;
82 struct ucontext rs_uc;
83 u32 rs_code[8] ____cacheline_aligned; /* signal trampoline */
84};
85
86#endif
87
88/* 62/*
89 * Helper routines 63 * Helper routines
90 */ 64 */
@@ -266,32 +240,6 @@ void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
266 return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK)); 240 return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK));
267} 241}
268 242
269int install_sigtramp(unsigned int __user *tramp, unsigned int syscall)
270{
271 int err;
272
273 /*
274 * Set up the return code ...
275 *
276 * li v0, __NR__foo_sigreturn
277 * syscall
278 */
279
280 err = __put_user(0x24020000 + syscall, tramp + 0);
281 err |= __put_user(0x0000000c , tramp + 1);
282 if (ICACHE_REFILLS_WORKAROUND_WAR) {
283 err |= __put_user(0, tramp + 2);
284 err |= __put_user(0, tramp + 3);
285 err |= __put_user(0, tramp + 4);
286 err |= __put_user(0, tramp + 5);
287 err |= __put_user(0, tramp + 6);
288 err |= __put_user(0, tramp + 7);
289 }
290 flush_cache_sigtramp((unsigned long) tramp);
291
292 return err;
293}
294
295/* 243/*
296 * Atomically swap in the new signal mask, and wait for a signal. 244 * Atomically swap in the new signal mask, and wait for a signal.
297 */ 245 */
@@ -484,8 +432,8 @@ badframe:
484} 432}
485 433
486#ifdef CONFIG_TRAD_SIGNALS 434#ifdef CONFIG_TRAD_SIGNALS
487static int setup_frame(struct k_sigaction * ka, struct pt_regs *regs, 435static int setup_frame(void *sig_return, struct k_sigaction *ka,
488 int signr, sigset_t *set) 436 struct pt_regs *regs, int signr, sigset_t *set)
489{ 437{
490 struct sigframe __user *frame; 438 struct sigframe __user *frame;
491 int err = 0; 439 int err = 0;
@@ -494,8 +442,6 @@ static int setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
494 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 442 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
495 goto give_sigsegv; 443 goto give_sigsegv;
496 444
497 err |= install_sigtramp(frame->sf_code, __NR_sigreturn);
498
499 err |= setup_sigcontext(regs, &frame->sf_sc); 445 err |= setup_sigcontext(regs, &frame->sf_sc);
500 err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); 446 err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set));
501 if (err) 447 if (err)
@@ -515,7 +461,7 @@ static int setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
515 regs->regs[ 5] = 0; 461 regs->regs[ 5] = 0;
516 regs->regs[ 6] = (unsigned long) &frame->sf_sc; 462 regs->regs[ 6] = (unsigned long) &frame->sf_sc;
517 regs->regs[29] = (unsigned long) frame; 463 regs->regs[29] = (unsigned long) frame;
518 regs->regs[31] = (unsigned long) frame->sf_code; 464 regs->regs[31] = (unsigned long) sig_return;
519 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; 465 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
520 466
521 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 467 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
@@ -529,8 +475,9 @@ give_sigsegv:
529} 475}
530#endif 476#endif
531 477
532static int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, 478static int setup_rt_frame(void *sig_return, struct k_sigaction *ka,
533 int signr, sigset_t *set, siginfo_t *info) 479 struct pt_regs *regs, int signr, sigset_t *set,
480 siginfo_t *info)
534{ 481{
535 struct rt_sigframe __user *frame; 482 struct rt_sigframe __user *frame;
536 int err = 0; 483 int err = 0;
@@ -539,8 +486,6 @@ static int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
539 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 486 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
540 goto give_sigsegv; 487 goto give_sigsegv;
541 488
542 err |= install_sigtramp(frame->rs_code, __NR_rt_sigreturn);
543
544 /* Create siginfo. */ 489 /* Create siginfo. */
545 err |= copy_siginfo_to_user(&frame->rs_info, info); 490 err |= copy_siginfo_to_user(&frame->rs_info, info);
546 491
@@ -573,7 +518,7 @@ static int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
573 regs->regs[ 5] = (unsigned long) &frame->rs_info; 518 regs->regs[ 5] = (unsigned long) &frame->rs_info;
574 regs->regs[ 6] = (unsigned long) &frame->rs_uc; 519 regs->regs[ 6] = (unsigned long) &frame->rs_uc;
575 regs->regs[29] = (unsigned long) frame; 520 regs->regs[29] = (unsigned long) frame;
576 regs->regs[31] = (unsigned long) frame->rs_code; 521 regs->regs[31] = (unsigned long) sig_return;
577 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; 522 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
578 523
579 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 524 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
@@ -590,8 +535,11 @@ give_sigsegv:
590struct mips_abi mips_abi = { 535struct mips_abi mips_abi = {
591#ifdef CONFIG_TRAD_SIGNALS 536#ifdef CONFIG_TRAD_SIGNALS
592 .setup_frame = setup_frame, 537 .setup_frame = setup_frame,
538 .signal_return_offset = offsetof(struct mips_vdso, signal_trampoline),
593#endif 539#endif
594 .setup_rt_frame = setup_rt_frame, 540 .setup_rt_frame = setup_rt_frame,
541 .rt_signal_return_offset =
542 offsetof(struct mips_vdso, rt_signal_trampoline),
595 .restart = __NR_restart_syscall 543 .restart = __NR_restart_syscall
596}; 544};
597 545
@@ -599,6 +547,8 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
599 struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) 547 struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs)
600{ 548{
601 int ret; 549 int ret;
550 struct mips_abi *abi = current->thread.abi;
551 void *vdso = current->mm->context.vdso;
602 552
603 switch(regs->regs[0]) { 553 switch(regs->regs[0]) {
604 case ERESTART_RESTARTBLOCK: 554 case ERESTART_RESTARTBLOCK:
@@ -619,9 +569,11 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
619 regs->regs[0] = 0; /* Don't deal with this again. */ 569 regs->regs[0] = 0; /* Don't deal with this again. */
620 570
621 if (sig_uses_siginfo(ka)) 571 if (sig_uses_siginfo(ka))
622 ret = current->thread.abi->setup_rt_frame(ka, regs, sig, oldset, info); 572 ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset,
573 ka, regs, sig, oldset, info);
623 else 574 else
624 ret = current->thread.abi->setup_frame(ka, regs, sig, oldset); 575 ret = abi->setup_frame(vdso + abi->signal_return_offset,
576 ka, regs, sig, oldset);
625 577
626 spin_lock_irq(&current->sighand->siglock); 578 spin_lock_irq(&current->sighand->siglock);
627 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); 579 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 03abaf048f09..a0ed0e052b2e 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -32,6 +32,7 @@
32#include <asm/system.h> 32#include <asm/system.h>
33#include <asm/fpu.h> 33#include <asm/fpu.h>
34#include <asm/war.h> 34#include <asm/war.h>
35#include <asm/vdso.h>
35 36
36#include "signal-common.h" 37#include "signal-common.h"
37 38
@@ -47,8 +48,6 @@ extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user
47/* 48/*
48 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... 49 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
49 */ 50 */
50#define __NR_O32_sigreturn 4119
51#define __NR_O32_rt_sigreturn 4193
52#define __NR_O32_restart_syscall 4253 51#define __NR_O32_restart_syscall 4253
53 52
54/* 32-bit compatibility types */ 53/* 32-bit compatibility types */
@@ -77,47 +76,20 @@ struct ucontext32 {
77 compat_sigset_t uc_sigmask; /* mask last for extensibility */ 76 compat_sigset_t uc_sigmask; /* mask last for extensibility */
78}; 77};
79 78
80/*
81 * Horribly complicated - with the bloody RM9000 workarounds enabled
82 * the signal trampolines is moving to the end of the structure so we can
83 * increase the alignment without breaking software compatibility.
84 */
85#if ICACHE_REFILLS_WORKAROUND_WAR == 0
86
87struct sigframe32 { 79struct sigframe32 {
88 u32 sf_ass[4]; /* argument save space for o32 */ 80 u32 sf_ass[4]; /* argument save space for o32 */
89 u32 sf_code[2]; /* signal trampoline */ 81 u32 sf_pad[2]; /* Was: signal trampoline */
90 struct sigcontext32 sf_sc; 82 struct sigcontext32 sf_sc;
91 compat_sigset_t sf_mask; 83 compat_sigset_t sf_mask;
92}; 84};
93 85
94struct rt_sigframe32 { 86struct rt_sigframe32 {
95 u32 rs_ass[4]; /* argument save space for o32 */ 87 u32 rs_ass[4]; /* argument save space for o32 */
96 u32 rs_code[2]; /* signal trampoline */ 88 u32 rs_pad[2]; /* Was: signal trampoline */
97 compat_siginfo_t rs_info; 89 compat_siginfo_t rs_info;
98 struct ucontext32 rs_uc; 90 struct ucontext32 rs_uc;
99}; 91};
100 92
101#else /* ICACHE_REFILLS_WORKAROUND_WAR */
102
103struct sigframe32 {
104 u32 sf_ass[4]; /* argument save space for o32 */
105 u32 sf_pad[2];
106 struct sigcontext32 sf_sc; /* hw context */
107 compat_sigset_t sf_mask;
108 u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */
109};
110
111struct rt_sigframe32 {
112 u32 rs_ass[4]; /* argument save space for o32 */
113 u32 rs_pad[2];
114 compat_siginfo_t rs_info;
115 struct ucontext32 rs_uc;
116 u32 rs_code[8] __attribute__((aligned(32))); /* signal trampoline */
117};
118
119#endif /* !ICACHE_REFILLS_WORKAROUND_WAR */
120
121/* 93/*
122 * sigcontext handlers 94 * sigcontext handlers
123 */ 95 */
@@ -598,8 +570,8 @@ badframe:
598 force_sig(SIGSEGV, current); 570 force_sig(SIGSEGV, current);
599} 571}
600 572
601static int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs, 573static int setup_frame_32(void *sig_return, struct k_sigaction *ka,
602 int signr, sigset_t *set) 574 struct pt_regs *regs, int signr, sigset_t *set)
603{ 575{
604 struct sigframe32 __user *frame; 576 struct sigframe32 __user *frame;
605 int err = 0; 577 int err = 0;
@@ -608,8 +580,6 @@ static int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
608 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 580 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
609 goto give_sigsegv; 581 goto give_sigsegv;
610 582
611 err |= install_sigtramp(frame->sf_code, __NR_O32_sigreturn);
612
613 err |= setup_sigcontext32(regs, &frame->sf_sc); 583 err |= setup_sigcontext32(regs, &frame->sf_sc);
614 err |= __copy_conv_sigset_to_user(&frame->sf_mask, set); 584 err |= __copy_conv_sigset_to_user(&frame->sf_mask, set);
615 585
@@ -630,7 +600,7 @@ static int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
630 regs->regs[ 5] = 0; 600 regs->regs[ 5] = 0;
631 regs->regs[ 6] = (unsigned long) &frame->sf_sc; 601 regs->regs[ 6] = (unsigned long) &frame->sf_sc;
632 regs->regs[29] = (unsigned long) frame; 602 regs->regs[29] = (unsigned long) frame;
633 regs->regs[31] = (unsigned long) frame->sf_code; 603 regs->regs[31] = (unsigned long) sig_return;
634 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; 604 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
635 605
636 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 606 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
@@ -644,8 +614,9 @@ give_sigsegv:
644 return -EFAULT; 614 return -EFAULT;
645} 615}
646 616
647static int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs, 617static int setup_rt_frame_32(void *sig_return, struct k_sigaction *ka,
648 int signr, sigset_t *set, siginfo_t *info) 618 struct pt_regs *regs, int signr, sigset_t *set,
619 siginfo_t *info)
649{ 620{
650 struct rt_sigframe32 __user *frame; 621 struct rt_sigframe32 __user *frame;
651 int err = 0; 622 int err = 0;
@@ -655,8 +626,6 @@ static int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
655 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 626 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
656 goto give_sigsegv; 627 goto give_sigsegv;
657 628
658 err |= install_sigtramp(frame->rs_code, __NR_O32_rt_sigreturn);
659
660 /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */ 629 /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */
661 err |= copy_siginfo_to_user32(&frame->rs_info, info); 630 err |= copy_siginfo_to_user32(&frame->rs_info, info);
662 631
@@ -690,7 +659,7 @@ static int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
690 regs->regs[ 5] = (unsigned long) &frame->rs_info; 659 regs->regs[ 5] = (unsigned long) &frame->rs_info;
691 regs->regs[ 6] = (unsigned long) &frame->rs_uc; 660 regs->regs[ 6] = (unsigned long) &frame->rs_uc;
692 regs->regs[29] = (unsigned long) frame; 661 regs->regs[29] = (unsigned long) frame;
693 regs->regs[31] = (unsigned long) frame->rs_code; 662 regs->regs[31] = (unsigned long) sig_return;
694 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; 663 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
695 664
696 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 665 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
@@ -709,7 +678,11 @@ give_sigsegv:
709 */ 678 */
710struct mips_abi mips_abi_32 = { 679struct mips_abi mips_abi_32 = {
711 .setup_frame = setup_frame_32, 680 .setup_frame = setup_frame_32,
681 .signal_return_offset =
682 offsetof(struct mips_vdso, o32_signal_trampoline),
712 .setup_rt_frame = setup_rt_frame_32, 683 .setup_rt_frame = setup_rt_frame_32,
684 .rt_signal_return_offset =
685 offsetof(struct mips_vdso, o32_rt_signal_trampoline),
713 .restart = __NR_O32_restart_syscall 686 .restart = __NR_O32_restart_syscall
714}; 687};
715 688
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index bb277e82d421..2c5df818c65a 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -39,13 +39,13 @@
39#include <asm/fpu.h> 39#include <asm/fpu.h>
40#include <asm/cpu-features.h> 40#include <asm/cpu-features.h>
41#include <asm/war.h> 41#include <asm/war.h>
42#include <asm/vdso.h>
42 43
43#include "signal-common.h" 44#include "signal-common.h"
44 45
45/* 46/*
46 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... 47 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
47 */ 48 */
48#define __NR_N32_rt_sigreturn 6211
49#define __NR_N32_restart_syscall 6214 49#define __NR_N32_restart_syscall 6214
50 50
51extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *); 51extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *);
@@ -67,27 +67,13 @@ struct ucontextn32 {
67 compat_sigset_t uc_sigmask; /* mask last for extensibility */ 67 compat_sigset_t uc_sigmask; /* mask last for extensibility */
68}; 68};
69 69
70#if ICACHE_REFILLS_WORKAROUND_WAR == 0
71
72struct rt_sigframe_n32 {
73 u32 rs_ass[4]; /* argument save space for o32 */
74 u32 rs_code[2]; /* signal trampoline */
75 struct compat_siginfo rs_info;
76 struct ucontextn32 rs_uc;
77};
78
79#else /* ICACHE_REFILLS_WORKAROUND_WAR */
80
81struct rt_sigframe_n32 { 70struct rt_sigframe_n32 {
82 u32 rs_ass[4]; /* argument save space for o32 */ 71 u32 rs_ass[4]; /* argument save space for o32 */
83 u32 rs_pad[2]; 72 u32 rs_pad[2]; /* Was: signal trampoline */
84 struct compat_siginfo rs_info; 73 struct compat_siginfo rs_info;
85 struct ucontextn32 rs_uc; 74 struct ucontextn32 rs_uc;
86 u32 rs_code[8] ____cacheline_aligned; /* signal trampoline */
87}; 75};
88 76
89#endif /* !ICACHE_REFILLS_WORKAROUND_WAR */
90
91extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat); 77extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat);
92 78
93asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) 79asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
@@ -173,7 +159,7 @@ badframe:
173 force_sig(SIGSEGV, current); 159 force_sig(SIGSEGV, current);
174} 160}
175 161
176static int setup_rt_frame_n32(struct k_sigaction * ka, 162static int setup_rt_frame_n32(void *sig_return, struct k_sigaction *ka,
177 struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info) 163 struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info)
178{ 164{
179 struct rt_sigframe_n32 __user *frame; 165 struct rt_sigframe_n32 __user *frame;
@@ -184,8 +170,6 @@ static int setup_rt_frame_n32(struct k_sigaction * ka,
184 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 170 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
185 goto give_sigsegv; 171 goto give_sigsegv;
186 172
187 install_sigtramp(frame->rs_code, __NR_N32_rt_sigreturn);
188
189 /* Create siginfo. */ 173 /* Create siginfo. */
190 err |= copy_siginfo_to_user32(&frame->rs_info, info); 174 err |= copy_siginfo_to_user32(&frame->rs_info, info);
191 175
@@ -219,7 +203,7 @@ static int setup_rt_frame_n32(struct k_sigaction * ka,
219 regs->regs[ 5] = (unsigned long) &frame->rs_info; 203 regs->regs[ 5] = (unsigned long) &frame->rs_info;
220 regs->regs[ 6] = (unsigned long) &frame->rs_uc; 204 regs->regs[ 6] = (unsigned long) &frame->rs_uc;
221 regs->regs[29] = (unsigned long) frame; 205 regs->regs[29] = (unsigned long) frame;
222 regs->regs[31] = (unsigned long) frame->rs_code; 206 regs->regs[31] = (unsigned long) sig_return;
223 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; 207 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
224 208
225 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 209 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
@@ -235,5 +219,7 @@ give_sigsegv:
235 219
236struct mips_abi mips_abi_n32 = { 220struct mips_abi mips_abi_n32 = {
237 .setup_rt_frame = setup_rt_frame_n32, 221 .setup_rt_frame = setup_rt_frame_n32,
222 .rt_signal_return_offset =
223 offsetof(struct mips_vdso, n32_rt_signal_trampoline),
238 .restart = __NR_N32_restart_syscall 224 .restart = __NR_N32_restart_syscall
239}; 225};
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 25e825aea327..a95dea5459c4 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -182,7 +182,7 @@ static int vpemask[2][8] = {
182 {0, 0, 0, 0, 0, 0, 0, 1} 182 {0, 0, 0, 0, 0, 0, 0, 1}
183}; 183};
184int tcnoprog[NR_CPUS]; 184int tcnoprog[NR_CPUS];
185static atomic_t idle_hook_initialized = {0}; 185static atomic_t idle_hook_initialized = ATOMIC_INIT(0);
186static int clock_hang_reported[NR_CPUS]; 186static int clock_hang_reported[NR_CPUS];
187 187
188#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ 188#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 9587abc67f35..dd81b0f87518 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -79,7 +79,11 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
79 int do_color_align; 79 int do_color_align;
80 unsigned long task_size; 80 unsigned long task_size;
81 81
82 task_size = STACK_TOP; 82#ifdef CONFIG_32BIT
83 task_size = TASK_SIZE;
84#else /* Must be CONFIG_64BIT*/
85 task_size = test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE;
86#endif
83 87
84 if (len > task_size) 88 if (len > task_size)
85 return -ENOMEM; 89 return -ENOMEM;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 4e00f9bc23ee..1a4dd657ccb9 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1599,7 +1599,7 @@ void __init trap_init(void)
1599 ebase = (unsigned long) 1599 ebase = (unsigned long)
1600 __alloc_bootmem(size, 1 << fls(size), 0); 1600 __alloc_bootmem(size, 1 << fls(size), 0);
1601 } else { 1601 } else {
1602 ebase = CAC_BASE; 1602 ebase = CKSEG0;
1603 if (cpu_has_mips_r2) 1603 if (cpu_has_mips_r2)
1604 ebase += (read_c0_ebase() & 0x3ffff000); 1604 ebase += (read_c0_ebase() & 0x3ffff000);
1605 } 1605 }
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
new file mode 100644
index 000000000000..b773c1112b14
--- /dev/null
+++ b/arch/mips/kernel/vdso.c
@@ -0,0 +1,112 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2009, 2010 Cavium Networks, Inc.
7 */
8
9
10#include <linux/kernel.h>
11#include <linux/err.h>
12#include <linux/sched.h>
13#include <linux/mm.h>
14#include <linux/init.h>
15#include <linux/binfmts.h>
16#include <linux/elf.h>
17#include <linux/vmalloc.h>
18#include <linux/unistd.h>
19
20#include <asm/vdso.h>
21#include <asm/uasm.h>
22
23/*
24 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
25 */
26#define __NR_O32_sigreturn 4119
27#define __NR_O32_rt_sigreturn 4193
28#define __NR_N32_rt_sigreturn 6211
29
30static struct page *vdso_page;
31
32static void __init install_trampoline(u32 *tramp, unsigned int sigreturn)
33{
34 uasm_i_addiu(&tramp, 2, 0, sigreturn); /* li v0, sigreturn */
35 uasm_i_syscall(&tramp, 0);
36}
37
38static int __init init_vdso(void)
39{
40 struct mips_vdso *vdso;
41
42 vdso_page = alloc_page(GFP_KERNEL);
43 if (!vdso_page)
44 panic("Cannot allocate vdso");
45
46 vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL);
47 if (!vdso)
48 panic("Cannot map vdso");
49 clear_page(vdso);
50
51 install_trampoline(vdso->rt_signal_trampoline, __NR_rt_sigreturn);
52#ifdef CONFIG_32BIT
53 install_trampoline(vdso->signal_trampoline, __NR_sigreturn);
54#else
55 install_trampoline(vdso->n32_rt_signal_trampoline,
56 __NR_N32_rt_sigreturn);
57 install_trampoline(vdso->o32_signal_trampoline, __NR_O32_sigreturn);
58 install_trampoline(vdso->o32_rt_signal_trampoline,
59 __NR_O32_rt_sigreturn);
60#endif
61
62 vunmap(vdso);
63
64 pr_notice("init_vdso successfull\n");
65
66 return 0;
67}
68device_initcall(init_vdso);
69
70static unsigned long vdso_addr(unsigned long start)
71{
72 return STACK_TOP;
73}
74
75int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
76{
77 int ret;
78 unsigned long addr;
79 struct mm_struct *mm = current->mm;
80
81 down_write(&mm->mmap_sem);
82
83 addr = vdso_addr(mm->start_stack);
84
85 addr = get_unmapped_area(NULL, addr, PAGE_SIZE, 0, 0);
86 if (IS_ERR_VALUE(addr)) {
87 ret = addr;
88 goto up_fail;
89 }
90
91 ret = install_special_mapping(mm, addr, PAGE_SIZE,
92 VM_READ|VM_EXEC|
93 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
94 VM_ALWAYSDUMP,
95 &vdso_page);
96
97 if (ret)
98 goto up_fail;
99
100 mm->context.vdso = (void *)addr;
101
102up_fail:
103 up_write(&mm->mmap_sem);
104 return ret;
105}
106
107const char *arch_vma_name(struct vm_area_struct *vma)
108{
109 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
110 return "[vdso]";
111 return NULL;
112}
diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c
index 6b3b1de9dcae..5995969e8c42 100644
--- a/arch/mips/lib/delay.c
+++ b/arch/mips/lib/delay.c
@@ -41,7 +41,7 @@ EXPORT_SYMBOL(__delay);
41 41
42void __udelay(unsigned long us) 42void __udelay(unsigned long us)
43{ 43{
44 unsigned int lpj = current_cpu_data.udelay_val; 44 unsigned int lpj = raw_current_cpu_data.udelay_val;
45 45
46 __delay((us * 0x000010c7ull * HZ * lpj) >> 32); 46 __delay((us * 0x000010c7ull * HZ * lpj) >> 32);
47} 47}
@@ -49,7 +49,7 @@ EXPORT_SYMBOL(__udelay);
49 49
50void __ndelay(unsigned long ns) 50void __ndelay(unsigned long ns)
51{ 51{
52 unsigned int lpj = current_cpu_data.udelay_val; 52 unsigned int lpj = raw_current_cpu_data.udelay_val;
53 53
54 __delay((ns * 0x00000005ull * HZ * lpj) >> 32); 54 __delay((ns * 0x00000005ull * HZ * lpj) >> 32);
55} 55}
diff --git a/arch/mips/lib/libgcc.h b/arch/mips/lib/libgcc.h
index 3f19d1c5d942..05909d58e2fe 100644
--- a/arch/mips/lib/libgcc.h
+++ b/arch/mips/lib/libgcc.h
@@ -17,8 +17,7 @@ struct DWstruct {
17#error I feel sick. 17#error I feel sick.
18#endif 18#endif
19 19
20typedef union 20typedef union {
21{
22 struct DWstruct s; 21 struct DWstruct s;
23 long long ll; 22 long long ll;
24} DWunion; 23} DWunion;
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index be8627bc5b02..12af739048fa 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -133,7 +133,7 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address,
133} 133}
134 134
135unsigned long _page_cachable_default; 135unsigned long _page_cachable_default;
136EXPORT_SYMBOL_GPL(_page_cachable_default); 136EXPORT_SYMBOL(_page_cachable_default);
137 137
138static inline void setup_protection_map(void) 138static inline void setup_protection_map(void)
139{ 139{
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 0de0e4127d66..d1f68aadbc4c 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -788,10 +788,15 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
788 * create the plain linear handler 788 * create the plain linear handler
789 */ 789 */
790 if (bcm1250_m3_war()) { 790 if (bcm1250_m3_war()) {
791 UASM_i_MFC0(&p, K0, C0_BADVADDR); 791 unsigned int segbits = 44;
792 UASM_i_MFC0(&p, K1, C0_ENTRYHI); 792
793 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
794 uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
793 uasm_i_xor(&p, K0, K0, K1); 795 uasm_i_xor(&p, K0, K0, K1);
794 UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); 796 uasm_i_dsrl32(&p, K1, K0, 62 - 32);
797 uasm_i_dsrl(&p, K0, K0, 12 + 1);
798 uasm_i_dsll32(&p, K0, K0, 64 + 12 + 1 - segbits - 32);
799 uasm_i_or(&p, K0, K0, K1);
795 uasm_il_bnez(&p, &r, K0, label_leave); 800 uasm_il_bnez(&p, &r, K0, label_leave);
796 /* No need for uasm_i_nop */ 801 /* No need for uasm_i_nop */
797 } 802 }
@@ -1312,10 +1317,15 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1312 memset(relocs, 0, sizeof(relocs)); 1317 memset(relocs, 0, sizeof(relocs));
1313 1318
1314 if (bcm1250_m3_war()) { 1319 if (bcm1250_m3_war()) {
1315 UASM_i_MFC0(&p, K0, C0_BADVADDR); 1320 unsigned int segbits = 44;
1316 UASM_i_MFC0(&p, K1, C0_ENTRYHI); 1321
1322 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1323 uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
1317 uasm_i_xor(&p, K0, K0, K1); 1324 uasm_i_xor(&p, K0, K0, K1);
1318 UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); 1325 uasm_i_dsrl32(&p, K1, K0, 62 - 32);
1326 uasm_i_dsrl(&p, K0, K0, 12 + 1);
1327 uasm_i_dsll32(&p, K0, K0, 64 + 12 + 1 - segbits - 32);
1328 uasm_i_or(&p, K0, K0, K1);
1319 uasm_il_bnez(&p, &r, K0, label_leave); 1329 uasm_il_bnez(&p, &r, K0, label_leave);
1320 /* No need for uasm_i_nop */ 1330 /* No need for uasm_i_nop */
1321 } 1331 }
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 1581e9852461..611d564fdcf1 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -31,7 +31,8 @@ enum fields {
31 BIMM = 0x040, 31 BIMM = 0x040,
32 JIMM = 0x080, 32 JIMM = 0x080,
33 FUNC = 0x100, 33 FUNC = 0x100,
34 SET = 0x200 34 SET = 0x200,
35 SCIMM = 0x400
35}; 36};
36 37
37#define OP_MASK 0x3f 38#define OP_MASK 0x3f
@@ -52,6 +53,8 @@ enum fields {
52#define FUNC_SH 0 53#define FUNC_SH 0
53#define SET_MASK 0x7 54#define SET_MASK 0x7
54#define SET_SH 0 55#define SET_SH 0
56#define SCIMM_MASK 0xfffff
57#define SCIMM_SH 6
55 58
56enum opcode { 59enum opcode {
57 insn_invalid, 60 insn_invalid,
@@ -61,10 +64,10 @@ enum opcode {
61 insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, 64 insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl,
62 insn_dsrl32, insn_drotr, insn_dsubu, insn_eret, insn_j, insn_jal, 65 insn_dsrl32, insn_drotr, insn_dsubu, insn_eret, insn_j, insn_jal,
63 insn_jr, insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, 66 insn_jr, insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0,
64 insn_mtc0, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd, 67 insn_mtc0, insn_or, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd,
65 insn_sd, insn_sll, insn_sra, insn_srl, insn_rotr, insn_subu, insn_sw, 68 insn_sd, insn_sll, insn_sra, insn_srl, insn_rotr, insn_subu, insn_sw,
66 insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori, 69 insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori,
67 insn_dins 70 insn_dins, insn_syscall
68}; 71};
69 72
70struct insn { 73struct insn {
@@ -117,6 +120,7 @@ static struct insn insn_table[] __cpuinitdata = {
117 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 120 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
118 { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, 121 { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
119 { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, 122 { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
123 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
120 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 124 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
121 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 125 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
122 { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, 126 { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
@@ -136,6 +140,7 @@ static struct insn insn_table[] __cpuinitdata = {
136 { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, 140 { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
137 { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 141 { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
138 { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE }, 142 { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
143 { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
139 { insn_invalid, 0, 0 } 144 { insn_invalid, 0, 0 }
140}; 145};
141 146
@@ -208,6 +213,14 @@ static inline __cpuinit u32 build_jimm(u32 arg)
208 return (arg >> 2) & JIMM_MASK; 213 return (arg >> 2) & JIMM_MASK;
209} 214}
210 215
216static inline __cpuinit u32 build_scimm(u32 arg)
217{
218 if (arg & ~SCIMM_MASK)
219 printk(KERN_WARNING "Micro-assembler field overflow\n");
220
221 return (arg & SCIMM_MASK) << SCIMM_SH;
222}
223
211static inline __cpuinit u32 build_func(u32 arg) 224static inline __cpuinit u32 build_func(u32 arg)
212{ 225{
213 if (arg & ~FUNC_MASK) 226 if (arg & ~FUNC_MASK)
@@ -266,6 +279,8 @@ static void __cpuinit build_insn(u32 **buf, enum opcode opc, ...)
266 op |= build_func(va_arg(ap, u32)); 279 op |= build_func(va_arg(ap, u32));
267 if (ip->fields & SET) 280 if (ip->fields & SET)
268 op |= build_set(va_arg(ap, u32)); 281 op |= build_set(va_arg(ap, u32));
282 if (ip->fields & SCIMM)
283 op |= build_scimm(va_arg(ap, u32));
269 va_end(ap); 284 va_end(ap);
270 285
271 **buf = op; 286 **buf = op;
@@ -373,6 +388,7 @@ I_u2s3u1(_lw)
373I_u1u2u3(_mfc0) 388I_u1u2u3(_mfc0)
374I_u1u2u3(_mtc0) 389I_u1u2u3(_mtc0)
375I_u2u1u3(_ori) 390I_u2u1u3(_ori)
391I_u3u1u2(_or)
376I_u2s3u1(_pref) 392I_u2s3u1(_pref)
377I_0(_rfe) 393I_0(_rfe)
378I_u2s3u1(_sc) 394I_u2s3u1(_sc)
@@ -391,6 +407,7 @@ I_0(_tlbwr)
391I_u3u1u2(_xor) 407I_u3u1u2(_xor)
392I_u2u1u3(_xori) 408I_u2u1u3(_xori)
393I_u2u1msbu3(_dins); 409I_u2u1msbu3(_dins);
410I_u1(_syscall);
394 411
395/* Handle labels. */ 412/* Handle labels. */
396void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) 413void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
diff --git a/arch/mips/pci/ops-loongson2.c b/arch/mips/pci/ops-loongson2.c
index 2bb4057bf6c7..d657ee0bc131 100644
--- a/arch/mips/pci/ops-loongson2.c
+++ b/arch/mips/pci/ops-loongson2.c
@@ -180,15 +180,21 @@ struct pci_ops loongson_pci_ops = {
180}; 180};
181 181
182#ifdef CONFIG_CS5536 182#ifdef CONFIG_CS5536
183DEFINE_RAW_SPINLOCK(msr_lock);
184
183void _rdmsr(u32 msr, u32 *hi, u32 *lo) 185void _rdmsr(u32 msr, u32 *hi, u32 *lo)
184{ 186{
185 struct pci_bus bus = { 187 struct pci_bus bus = {
186 .number = PCI_BUS_CS5536 188 .number = PCI_BUS_CS5536
187 }; 189 };
188 u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0); 190 u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0);
191 unsigned long flags;
192
193 raw_spin_lock_irqsave(&msr_lock, flags);
189 loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr); 194 loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr);
190 loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_LO, 4, lo); 195 loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_LO, 4, lo);
191 loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_HI, 4, hi); 196 loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_HI, 4, hi);
197 raw_spin_unlock_irqrestore(&msr_lock, flags);
192} 198}
193EXPORT_SYMBOL(_rdmsr); 199EXPORT_SYMBOL(_rdmsr);
194 200
@@ -198,9 +204,13 @@ void _wrmsr(u32 msr, u32 hi, u32 lo)
198 .number = PCI_BUS_CS5536 204 .number = PCI_BUS_CS5536
199 }; 205 };
200 u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0); 206 u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0);
207 unsigned long flags;
208
209 raw_spin_lock_irqsave(&msr_lock, flags);
201 loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr); 210 loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr);
202 loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_LO, 4, lo); 211 loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_LO, 4, lo);
203 loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_HI, 4, hi); 212 loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_HI, 4, hi);
213 raw_spin_unlock_irqrestore(&msr_lock, flags);
204} 214}
205EXPORT_SYMBOL(_wrmsr); 215EXPORT_SYMBOL(_wrmsr);
206#endif 216#endif
diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c
index 0444da1e23c2..92da3155ce07 100644
--- a/arch/mips/sibyte/sb1250/setup.c
+++ b/arch/mips/sibyte/sb1250/setup.c
@@ -87,6 +87,21 @@ static int __init setup_bcm1250(void)
87 return ret; 87 return ret;
88} 88}
89 89
90int sb1250_m3_workaround_needed(void)
91{
92 switch (soc_type) {
93 case K_SYS_SOC_TYPE_BCM1250:
94 case K_SYS_SOC_TYPE_BCM1250_ALT:
95 case K_SYS_SOC_TYPE_BCM1250_ALT2:
96 case K_SYS_SOC_TYPE_BCM1125:
97 case K_SYS_SOC_TYPE_BCM1125H:
98 return soc_pass < K_SYS_REVISION_BCM1250_C0;
99
100 default:
101 return 0;
102 }
103}
104
90static int __init setup_bcm112x(void) 105static int __init setup_bcm112x(void)
91{ 106{
92 int ret = 0; 107 int ret = 0;
diff --git a/arch/mips/sibyte/swarm/platform.c b/arch/mips/sibyte/swarm/platform.c
index 54847fe1e564..097335262fb3 100644
--- a/arch/mips/sibyte/swarm/platform.c
+++ b/arch/mips/sibyte/swarm/platform.c
@@ -83,3 +83,57 @@ static int __init swarm_pata_init(void)
83device_initcall(swarm_pata_init); 83device_initcall(swarm_pata_init);
84 84
85#endif /* defined(CONFIG_SIBYTE_SWARM) || defined(CONFIG_SIBYTE_LITTLESUR) */ 85#endif /* defined(CONFIG_SIBYTE_SWARM) || defined(CONFIG_SIBYTE_LITTLESUR) */
86
87#define sb1250_dev_struct(num) \
88 static struct resource sb1250_res##num = { \
89 .name = "SB1250 MAC " __stringify(num), \
90 .flags = IORESOURCE_MEM, \
91 .start = A_MAC_CHANNEL_BASE(num), \
92 .end = A_MAC_CHANNEL_BASE(num + 1) -1, \
93 };\
94 static struct platform_device sb1250_dev##num = { \
95 .name = "sb1250-mac", \
96 .id = num, \
97 .resource = &sb1250_res##num, \
98 .num_resources = 1, \
99 }
100
101sb1250_dev_struct(0);
102sb1250_dev_struct(1);
103sb1250_dev_struct(2);
104sb1250_dev_struct(3);
105
106static struct platform_device *sb1250_devs[] __initdata = {
107 &sb1250_dev0,
108 &sb1250_dev1,
109 &sb1250_dev2,
110 &sb1250_dev3,
111};
112
113static int __init sb1250_device_init(void)
114{
115 int ret;
116
117 /* Set the number of available units based on the SOC type. */
118 switch (soc_type) {
119 case K_SYS_SOC_TYPE_BCM1250:
120 case K_SYS_SOC_TYPE_BCM1250_ALT:
121 ret = platform_add_devices(sb1250_devs, 3);
122 break;
123 case K_SYS_SOC_TYPE_BCM1120:
124 case K_SYS_SOC_TYPE_BCM1125:
125 case K_SYS_SOC_TYPE_BCM1125H:
126 case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */
127 ret = platform_add_devices(sb1250_devs, 2);
128 break;
129 case K_SYS_SOC_TYPE_BCM1x55:
130 case K_SYS_SOC_TYPE_BCM1x80:
131 ret = platform_add_devices(sb1250_devs, 4);
132 break;
133 default:
134 ret = -ENODEV;
135 break;
136 }
137 return ret;
138}
139device_initcall(sb1250_device_init);
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 6db513674050..9908d477ccd9 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -37,6 +37,9 @@ config SPARC64
37 def_bool 64BIT 37 def_bool 64BIT
38 select ARCH_SUPPORTS_MSI 38 select ARCH_SUPPORTS_MSI
39 select HAVE_FUNCTION_TRACER 39 select HAVE_FUNCTION_TRACER
40 select HAVE_FUNCTION_GRAPH_TRACER
41 select HAVE_FUNCTION_GRAPH_FP_TEST
42 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
40 select HAVE_KRETPROBES 43 select HAVE_KRETPROBES
41 select HAVE_KPROBES 44 select HAVE_KPROBES
42 select HAVE_LMB 45 select HAVE_LMB
diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug
index 9d3c889718ac..1b4a831565f9 100644
--- a/arch/sparc/Kconfig.debug
+++ b/arch/sparc/Kconfig.debug
@@ -19,13 +19,10 @@ config DEBUG_DCFLUSH
19 bool "D-cache flush debugging" 19 bool "D-cache flush debugging"
20 depends on SPARC64 && DEBUG_KERNEL 20 depends on SPARC64 && DEBUG_KERNEL
21 21
22config STACK_DEBUG
23 bool "Stack Overflow Detection Support"
24
25config MCOUNT 22config MCOUNT
26 bool 23 bool
27 depends on SPARC64 24 depends on SPARC64
28 depends on STACK_DEBUG || FUNCTION_TRACER 25 depends on FUNCTION_TRACER
29 default y 26 default y
30 27
31config FRAME_POINTER 28config FRAME_POINTER
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index 926397d345ff..050ef35b9dcf 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -17,7 +17,7 @@ typedef struct {
17 unsigned int __nmi_count; 17 unsigned int __nmi_count;
18 unsigned long clock_tick; /* %tick's per second */ 18 unsigned long clock_tick; /* %tick's per second */
19 unsigned long __pad; 19 unsigned long __pad;
20 unsigned int __pad1; 20 unsigned int irq0_irqs;
21 unsigned int __pad2; 21 unsigned int __pad2;
22 22
23 /* Dcache line 2, rarely used */ 23 /* Dcache line 2, rarely used */
diff --git a/arch/sparc/include/asm/irqflags_64.h b/arch/sparc/include/asm/irqflags_64.h
index 8b49bf920df3..bfa1ea45b4cd 100644
--- a/arch/sparc/include/asm/irqflags_64.h
+++ b/arch/sparc/include/asm/irqflags_64.h
@@ -76,9 +76,26 @@ static inline int raw_irqs_disabled(void)
76 */ 76 */
77static inline unsigned long __raw_local_irq_save(void) 77static inline unsigned long __raw_local_irq_save(void)
78{ 78{
79 unsigned long flags = __raw_local_save_flags(); 79 unsigned long flags, tmp;
80 80
81 raw_local_irq_disable(); 81 /* Disable interrupts to PIL_NORMAL_MAX unless we already
82 * are using PIL_NMI, in which case PIL_NMI is retained.
83 *
84 * The only values we ever program into the %pil are 0,
85 * PIL_NORMAL_MAX and PIL_NMI.
86 *
87 * Since PIL_NMI is the largest %pil value and all bits are
88 * set in it (0xf), it doesn't matter what PIL_NORMAL_MAX
89 * actually is.
90 */
91 __asm__ __volatile__(
92 "rdpr %%pil, %0\n\t"
93 "or %0, %2, %1\n\t"
94 "wrpr %1, 0x0, %%pil"
95 : "=r" (flags), "=r" (tmp)
96 : "i" (PIL_NORMAL_MAX)
97 : "memory"
98 );
82 99
83 return flags; 100 return flags;
84} 101}
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 9e2d9447f2ad..4827a3aeac7f 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -111,7 +111,7 @@ struct thread_info {
111#define THREAD_SHIFT PAGE_SHIFT 111#define THREAD_SHIFT PAGE_SHIFT
112#endif /* PAGE_SHIFT == 13 */ 112#endif /* PAGE_SHIFT == 13 */
113 113
114#define PREEMPT_ACTIVE 0x4000000 114#define PREEMPT_ACTIVE 0x10000000
115 115
116/* 116/*
117 * macros/functions for gaining access to the thread information structure 117 * macros/functions for gaining access to the thread information structure
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index c6316142db4e..0c2dc1f24a9a 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -13,6 +13,14 @@ extra-y += init_task.o
13CPPFLAGS_vmlinux.lds := -Usparc -m$(BITS) 13CPPFLAGS_vmlinux.lds := -Usparc -m$(BITS)
14extra-y += vmlinux.lds 14extra-y += vmlinux.lds
15 15
16ifdef CONFIG_FUNCTION_TRACER
17# Do not profile debug and lowlevel utilities
18CFLAGS_REMOVE_ftrace.o := -pg
19CFLAGS_REMOVE_time_$(BITS).o := -pg
20CFLAGS_REMOVE_perf_event.o := -pg
21CFLAGS_REMOVE_pcr.o := -pg
22endif
23
16obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o 24obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o
17obj-$(CONFIG_SPARC32) += etrap_32.o 25obj-$(CONFIG_SPARC32) += etrap_32.o
18obj-$(CONFIG_SPARC32) += rtrap_32.o 26obj-$(CONFIG_SPARC32) += rtrap_32.o
@@ -85,7 +93,7 @@ obj-$(CONFIG_KGDB) += kgdb_$(BITS).o
85 93
86 94
87obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 95obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
88CFLAGS_REMOVE_ftrace.o := -pg 96obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
89 97
90obj-$(CONFIG_EARLYFB) += btext.o 98obj-$(CONFIG_EARLYFB) += btext.o
91obj-$(CONFIG_STACKTRACE) += stacktrace.o 99obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index 9103a56b39e8..03ab022e51c5 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -13,7 +13,7 @@ static const u32 ftrace_nop = 0x01000000;
13 13
14static u32 ftrace_call_replace(unsigned long ip, unsigned long addr) 14static u32 ftrace_call_replace(unsigned long ip, unsigned long addr)
15{ 15{
16 static u32 call; 16 u32 call;
17 s32 off; 17 s32 off;
18 18
19 off = ((s32)addr - (s32)ip); 19 off = ((s32)addr - (s32)ip);
@@ -91,3 +91,61 @@ int __init ftrace_dyn_arch_init(void *data)
91 return 0; 91 return 0;
92} 92}
93#endif 93#endif
94
95#ifdef CONFIG_FUNCTION_GRAPH_TRACER
96
97#ifdef CONFIG_DYNAMIC_FTRACE
98extern void ftrace_graph_call(void);
99
100int ftrace_enable_ftrace_graph_caller(void)
101{
102 unsigned long ip = (unsigned long)(&ftrace_graph_call);
103 u32 old, new;
104
105 old = *(u32 *) &ftrace_graph_call;
106 new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller);
107 return ftrace_modify_code(ip, old, new);
108}
109
110int ftrace_disable_ftrace_graph_caller(void)
111{
112 unsigned long ip = (unsigned long)(&ftrace_graph_call);
113 u32 old, new;
114
115 old = *(u32 *) &ftrace_graph_call;
116 new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub);
117
118 return ftrace_modify_code(ip, old, new);
119}
120
121#endif /* !CONFIG_DYNAMIC_FTRACE */
122
123/*
124 * Hook the return address and push it in the stack of return addrs
125 * in current thread info.
126 */
127unsigned long prepare_ftrace_return(unsigned long parent,
128 unsigned long self_addr,
129 unsigned long frame_pointer)
130{
131 unsigned long return_hooker = (unsigned long) &return_to_handler;
132 struct ftrace_graph_ent trace;
133
134 if (unlikely(atomic_read(&current->tracing_graph_pause)))
135 return parent + 8UL;
136
137 if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
138 frame_pointer) == -EBUSY)
139 return parent + 8UL;
140
141 trace.func = self_addr;
142
143 /* Only trace if the calling function expects to */
144 if (!ftrace_graph_entry(&trace)) {
145 current->curr_ret_stack--;
146 return parent + 8UL;
147 }
148
149 return return_hooker;
150}
151#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index e1cbdb94d97b..830d70a3e20b 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -20,7 +20,9 @@
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/proc_fs.h> 21#include <linux/proc_fs.h>
22#include <linux/seq_file.h> 22#include <linux/seq_file.h>
23#include <linux/ftrace.h>
23#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/kmemleak.h>
24 26
25#include <asm/ptrace.h> 27#include <asm/ptrace.h>
26#include <asm/processor.h> 28#include <asm/processor.h>
@@ -45,6 +47,7 @@
45 47
46#include "entry.h" 48#include "entry.h"
47#include "cpumap.h" 49#include "cpumap.h"
50#include "kstack.h"
48 51
49#define NUM_IVECS (IMAP_INR + 1) 52#define NUM_IVECS (IMAP_INR + 1)
50 53
@@ -647,6 +650,14 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
647 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); 650 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
648 if (unlikely(!bucket)) 651 if (unlikely(!bucket))
649 return 0; 652 return 0;
653
654 /* The only reference we store to the IRQ bucket is
655 * by physical address which kmemleak can't see, tell
656 * it that this object explicitly is not a leak and
657 * should be scanned.
658 */
659 kmemleak_not_leak(bucket);
660
650 __flush_dcache_range((unsigned long) bucket, 661 __flush_dcache_range((unsigned long) bucket,
651 ((unsigned long) bucket + 662 ((unsigned long) bucket +
652 sizeof(struct ino_bucket))); 663 sizeof(struct ino_bucket)));
@@ -703,25 +714,7 @@ void ack_bad_irq(unsigned int virt_irq)
703void *hardirq_stack[NR_CPUS]; 714void *hardirq_stack[NR_CPUS];
704void *softirq_stack[NR_CPUS]; 715void *softirq_stack[NR_CPUS];
705 716
706static __attribute__((always_inline)) void *set_hardirq_stack(void) 717void __irq_entry handler_irq(int irq, struct pt_regs *regs)
707{
708 void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
709
710 __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
711 if (orig_sp < sp ||
712 orig_sp > (sp + THREAD_SIZE)) {
713 sp += THREAD_SIZE - 192 - STACK_BIAS;
714 __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
715 }
716
717 return orig_sp;
718}
719static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
720{
721 __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
722}
723
724void handler_irq(int irq, struct pt_regs *regs)
725{ 718{
726 unsigned long pstate, bucket_pa; 719 unsigned long pstate, bucket_pa;
727 struct pt_regs *old_regs; 720 struct pt_regs *old_regs;
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index f5a0fd490b59..0a2bd0f99fc1 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -5,6 +5,7 @@
5 5
6#include <linux/kgdb.h> 6#include <linux/kgdb.h>
7#include <linux/kdebug.h> 7#include <linux/kdebug.h>
8#include <linux/ftrace.h>
8 9
9#include <asm/kdebug.h> 10#include <asm/kdebug.h>
10#include <asm/ptrace.h> 11#include <asm/ptrace.h>
@@ -108,7 +109,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
108} 109}
109 110
110#ifdef CONFIG_SMP 111#ifdef CONFIG_SMP
111void smp_kgdb_capture_client(int irq, struct pt_regs *regs) 112void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs)
112{ 113{
113 unsigned long flags; 114 unsigned long flags;
114 115
diff --git a/arch/sparc/kernel/kstack.h b/arch/sparc/kernel/kstack.h
index 5247283d1c03..53dfb92e09fb 100644
--- a/arch/sparc/kernel/kstack.h
+++ b/arch/sparc/kernel/kstack.h
@@ -61,4 +61,23 @@ check_magic:
61 61
62} 62}
63 63
64static inline __attribute__((always_inline)) void *set_hardirq_stack(void)
65{
66 void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
67
68 __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
69 if (orig_sp < sp ||
70 orig_sp > (sp + THREAD_SIZE)) {
71 sp += THREAD_SIZE - 192 - STACK_BIAS;
72 __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
73 }
74
75 return orig_sp;
76}
77
78static inline __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
79{
80 __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
81}
82
64#endif /* _KSTACK_H */ 83#endif /* _KSTACK_H */
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index b287b62c7ea3..a4bd7ba74c89 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -23,6 +23,8 @@
23#include <asm/ptrace.h> 23#include <asm/ptrace.h>
24#include <asm/pcr.h> 24#include <asm/pcr.h>
25 25
26#include "kstack.h"
27
26/* We don't have a real NMI on sparc64, but we can fake one 28/* We don't have a real NMI on sparc64, but we can fake one
27 * up using profiling counter overflow interrupts and interrupt 29 * up using profiling counter overflow interrupts and interrupt
28 * levels. 30 * levels.
@@ -92,7 +94,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
92notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) 94notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
93{ 95{
94 unsigned int sum, touched = 0; 96 unsigned int sum, touched = 0;
95 int cpu = smp_processor_id(); 97 void *orig_sp;
96 98
97 clear_softint(1 << irq); 99 clear_softint(1 << irq);
98 100
@@ -100,13 +102,15 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
100 102
101 nmi_enter(); 103 nmi_enter();
102 104
105 orig_sp = set_hardirq_stack();
106
103 if (notify_die(DIE_NMI, "nmi", regs, 0, 107 if (notify_die(DIE_NMI, "nmi", regs, 0,
104 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) 108 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
105 touched = 1; 109 touched = 1;
106 else 110 else
107 pcr_ops->write(PCR_PIC_PRIV); 111 pcr_ops->write(PCR_PIC_PRIV);
108 112
109 sum = kstat_irqs_cpu(0, cpu); 113 sum = local_cpu_data().irq0_irqs;
110 if (__get_cpu_var(nmi_touch)) { 114 if (__get_cpu_var(nmi_touch)) {
111 __get_cpu_var(nmi_touch) = 0; 115 __get_cpu_var(nmi_touch) = 0;
112 touched = 1; 116 touched = 1;
@@ -125,6 +129,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
125 pcr_ops->write(pcr_enable); 129 pcr_ops->write(pcr_enable);
126 } 130 }
127 131
132 restore_hardirq_stack(orig_sp);
133
128 nmi_exit(); 134 nmi_exit();
129} 135}
130 136
diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
index b775658a927d..8a000583b5cf 100644
--- a/arch/sparc/kernel/pci_common.c
+++ b/arch/sparc/kernel/pci_common.c
@@ -371,14 +371,19 @@ static void pci_register_iommu_region(struct pci_pbm_info *pbm)
371 struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL); 371 struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL);
372 372
373 if (!rp) { 373 if (!rp) {
374 prom_printf("Cannot allocate IOMMU resource.\n"); 374 pr_info("%s: Cannot allocate IOMMU resource.\n",
375 prom_halt(); 375 pbm->name);
376 return;
376 } 377 }
377 rp->name = "IOMMU"; 378 rp->name = "IOMMU";
378 rp->start = pbm->mem_space.start + (unsigned long) vdma[0]; 379 rp->start = pbm->mem_space.start + (unsigned long) vdma[0];
379 rp->end = rp->start + (unsigned long) vdma[1] - 1UL; 380 rp->end = rp->start + (unsigned long) vdma[1] - 1UL;
380 rp->flags = IORESOURCE_BUSY; 381 rp->flags = IORESOURCE_BUSY;
381 request_resource(&pbm->mem_space, rp); 382 if (request_resource(&pbm->mem_space, rp)) {
383 pr_info("%s: Unable to request IOMMU resource.\n",
384 pbm->name);
385 kfree(rp);
386 }
382 } 387 }
383} 388}
384 389
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 2d94e7a03af5..c4a6a50b4849 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -8,6 +8,7 @@
8#include <linux/irq.h> 8#include <linux/irq.h>
9 9
10#include <linux/perf_event.h> 10#include <linux/perf_event.h>
11#include <linux/ftrace.h>
11 12
12#include <asm/pil.h> 13#include <asm/pil.h>
13#include <asm/pcr.h> 14#include <asm/pcr.h>
@@ -34,7 +35,7 @@ unsigned int picl_shift;
34 * Therefore in such situations we defer the work by signalling 35 * Therefore in such situations we defer the work by signalling
35 * a lower level cpu IRQ. 36 * a lower level cpu IRQ.
36 */ 37 */
37void deferred_pcr_work_irq(int irq, struct pt_regs *regs) 38void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
38{ 39{
39 struct pt_regs *old_regs; 40 struct pt_regs *old_regs;
40 41
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index 83f1873c6c13..090b9e9ad5e3 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -130,7 +130,17 @@ rtrap_xcall:
130 nop 130 nop
131 call trace_hardirqs_on 131 call trace_hardirqs_on
132 nop 132 nop
133 wrpr %l4, %pil 133 /* Do not actually set the %pil here. We will do that
134 * below after we clear PSTATE_IE in the %pstate register.
135 * If we re-enable interrupts here, we can recurse down
136 * the hardirq stack potentially endlessly, causing a
137 * stack overflow.
138 *
139 * It is tempting to put this test and trace_hardirqs_on
140 * call at the 'rt_continue' label, but that will not work
141 * as that path hits unconditionally and we do not want to
142 * execute this in NMI return paths, for example.
143 */
134#endif 144#endif
135rtrap_no_irq_enable: 145rtrap_no_irq_enable:
136 andcc %l1, TSTATE_PRIV, %l3 146 andcc %l1, TSTATE_PRIV, %l3
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 4c5334528109..b6a2b8f47040 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -22,6 +22,7 @@
22#include <linux/profile.h> 22#include <linux/profile.h>
23#include <linux/bootmem.h> 23#include <linux/bootmem.h>
24#include <linux/vmalloc.h> 24#include <linux/vmalloc.h>
25#include <linux/ftrace.h>
25#include <linux/cpu.h> 26#include <linux/cpu.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
27 28
@@ -823,13 +824,13 @@ void arch_send_call_function_single_ipi(int cpu)
823 &cpumask_of_cpu(cpu)); 824 &cpumask_of_cpu(cpu));
824} 825}
825 826
826void smp_call_function_client(int irq, struct pt_regs *regs) 827void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
827{ 828{
828 clear_softint(1 << irq); 829 clear_softint(1 << irq);
829 generic_smp_call_function_interrupt(); 830 generic_smp_call_function_interrupt();
830} 831}
831 832
832void smp_call_function_single_client(int irq, struct pt_regs *regs) 833void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
833{ 834{
834 clear_softint(1 << irq); 835 clear_softint(1 << irq);
835 generic_smp_call_function_single_interrupt(); 836 generic_smp_call_function_single_interrupt();
@@ -965,7 +966,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
965 put_cpu(); 966 put_cpu();
966} 967}
967 968
968void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) 969void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
969{ 970{
970 struct mm_struct *mm; 971 struct mm_struct *mm;
971 unsigned long flags; 972 unsigned long flags;
@@ -1149,7 +1150,7 @@ void smp_release(void)
1149 */ 1150 */
1150extern void prom_world(int); 1151extern void prom_world(int);
1151 1152
1152void smp_penguin_jailcell(int irq, struct pt_regs *regs) 1153void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
1153{ 1154{
1154 clear_softint(1 << irq); 1155 clear_softint(1 << irq);
1155 1156
@@ -1365,7 +1366,7 @@ void smp_send_reschedule(int cpu)
1365 &cpumask_of_cpu(cpu)); 1366 &cpumask_of_cpu(cpu));
1366} 1367}
1367 1368
1368void smp_receive_signal_client(int irq, struct pt_regs *regs) 1369void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1369{ 1370{
1370 clear_softint(1 << irq); 1371 clear_softint(1 << irq);
1371} 1372}
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 67e165102885..c7bbe6cf7b85 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -35,6 +35,7 @@
35#include <linux/clocksource.h> 35#include <linux/clocksource.h>
36#include <linux/of_device.h> 36#include <linux/of_device.h>
37#include <linux/platform_device.h> 37#include <linux/platform_device.h>
38#include <linux/ftrace.h>
38 39
39#include <asm/oplib.h> 40#include <asm/oplib.h>
40#include <asm/timer.h> 41#include <asm/timer.h>
@@ -717,7 +718,7 @@ static struct clock_event_device sparc64_clockevent = {
717}; 718};
718static DEFINE_PER_CPU(struct clock_event_device, sparc64_events); 719static DEFINE_PER_CPU(struct clock_event_device, sparc64_events);
719 720
720void timer_interrupt(int irq, struct pt_regs *regs) 721void __irq_entry timer_interrupt(int irq, struct pt_regs *regs)
721{ 722{
722 struct pt_regs *old_regs = set_irq_regs(regs); 723 struct pt_regs *old_regs = set_irq_regs(regs);
723 unsigned long tick_mask = tick_ops->softint_mask; 724 unsigned long tick_mask = tick_ops->softint_mask;
@@ -728,6 +729,7 @@ void timer_interrupt(int irq, struct pt_regs *regs)
728 729
729 irq_enter(); 730 irq_enter();
730 731
732 local_cpu_data().irq0_irqs++;
731 kstat_incr_irqs_this_cpu(0, irq_to_desc(0)); 733 kstat_incr_irqs_this_cpu(0, irq_to_desc(0));
732 734
733 if (unlikely(!evt->event_handler)) { 735 if (unlikely(!evt->event_handler)) {
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 837dfc2390d6..9da57f032983 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2203,27 +2203,6 @@ void dump_stack(void)
2203 2203
2204EXPORT_SYMBOL(dump_stack); 2204EXPORT_SYMBOL(dump_stack);
2205 2205
2206static inline int is_kernel_stack(struct task_struct *task,
2207 struct reg_window *rw)
2208{
2209 unsigned long rw_addr = (unsigned long) rw;
2210 unsigned long thread_base, thread_end;
2211
2212 if (rw_addr < PAGE_OFFSET) {
2213 if (task != &init_task)
2214 return 0;
2215 }
2216
2217 thread_base = (unsigned long) task_stack_page(task);
2218 thread_end = thread_base + sizeof(union thread_union);
2219 if (rw_addr >= thread_base &&
2220 rw_addr < thread_end &&
2221 !(rw_addr & 0x7UL))
2222 return 1;
2223
2224 return 0;
2225}
2226
2227static inline struct reg_window *kernel_stack_up(struct reg_window *rw) 2206static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2228{ 2207{
2229 unsigned long fp = rw->ins[6]; 2208 unsigned long fp = rw->ins[6];
@@ -2252,6 +2231,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
2252 show_regs(regs); 2231 show_regs(regs);
2253 add_taint(TAINT_DIE); 2232 add_taint(TAINT_DIE);
2254 if (regs->tstate & TSTATE_PRIV) { 2233 if (regs->tstate & TSTATE_PRIV) {
2234 struct thread_info *tp = current_thread_info();
2255 struct reg_window *rw = (struct reg_window *) 2235 struct reg_window *rw = (struct reg_window *)
2256 (regs->u_regs[UREG_FP] + STACK_BIAS); 2236 (regs->u_regs[UREG_FP] + STACK_BIAS);
2257 2237
@@ -2259,8 +2239,8 @@ void die_if_kernel(char *str, struct pt_regs *regs)
2259 * find some badly aligned kernel stack. 2239 * find some badly aligned kernel stack.
2260 */ 2240 */
2261 while (rw && 2241 while (rw &&
2262 count++ < 30&& 2242 count++ < 30 &&
2263 is_kernel_stack(current, rw)) { 2243 kstack_valid(tp, (unsigned long) rw)) {
2264 printk("Caller[%016lx]: %pS\n", rw->ins[7], 2244 printk("Caller[%016lx]: %pS\n", rw->ins[7],
2265 (void *) rw->ins[7]); 2245 (void *) rw->ins[7]);
2266 2246
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index ebce43018c49..c752c4c479bd 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -50,7 +50,7 @@ static inline enum direction decode_direction(unsigned int insn)
50} 50}
51 51
52/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */ 52/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
53static inline int decode_access_size(unsigned int insn) 53static inline int decode_access_size(struct pt_regs *regs, unsigned int insn)
54{ 54{
55 unsigned int tmp; 55 unsigned int tmp;
56 56
@@ -66,7 +66,7 @@ static inline int decode_access_size(unsigned int insn)
66 return 2; 66 return 2;
67 else { 67 else {
68 printk("Impossible unaligned trap. insn=%08x\n", insn); 68 printk("Impossible unaligned trap. insn=%08x\n", insn);
69 die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs); 69 die_if_kernel("Byte sized unaligned access?!?!", regs);
70 70
71 /* GCC should never warn that control reaches the end 71 /* GCC should never warn that control reaches the end
72 * of this function without returning a value because 72 * of this function without returning a value because
@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
286asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) 286asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
287{ 287{
288 enum direction dir = decode_direction(insn); 288 enum direction dir = decode_direction(insn);
289 int size = decode_access_size(insn); 289 int size = decode_access_size(regs, insn);
290 int orig_asi, asi; 290 int orig_asi, asi;
291 291
292 current_thread_info()->kern_una_regs = regs; 292 current_thread_info()->kern_una_regs = regs;
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 4e5992593967..0c1e6783657f 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -46,11 +46,16 @@ SECTIONS
46 SCHED_TEXT 46 SCHED_TEXT
47 LOCK_TEXT 47 LOCK_TEXT
48 KPROBES_TEXT 48 KPROBES_TEXT
49 IRQENTRY_TEXT
49 *(.gnu.warning) 50 *(.gnu.warning)
50 } = 0 51 } = 0
51 _etext = .; 52 _etext = .;
52 53
53 RO_DATA(PAGE_SIZE) 54 RO_DATA(PAGE_SIZE)
55
56 /* Start of data section */
57 _sdata = .;
58
54 .data1 : { 59 .data1 : {
55 *(.data1) 60 *(.data1)
56 } 61 }
diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S
index 24b8b12deed2..3ad6cbdc2163 100644
--- a/arch/sparc/lib/mcount.S
+++ b/arch/sparc/lib/mcount.S
@@ -7,26 +7,11 @@
7 7
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9 9
10#include <asm/ptrace.h>
11#include <asm/thread_info.h>
12
13/* 10/*
14 * This is the main variant and is called by C code. GCC's -pg option 11 * This is the main variant and is called by C code. GCC's -pg option
15 * automatically instruments every C function with a call to this. 12 * automatically instruments every C function with a call to this.
16 */ 13 */
17 14
18#ifdef CONFIG_STACK_DEBUG
19
20#define OVSTACKSIZE 4096 /* lets hope this is enough */
21
22 .data
23 .align 8
24panicstring:
25 .asciz "Stack overflow\n"
26 .align 8
27ovstack:
28 .skip OVSTACKSIZE
29#endif
30 .text 15 .text
31 .align 32 16 .align 32
32 .globl _mcount 17 .globl _mcount
@@ -35,84 +20,48 @@ ovstack:
35 .type mcount,#function 20 .type mcount,#function
36_mcount: 21_mcount:
37mcount: 22mcount:
38#ifdef CONFIG_STACK_DEBUG
39 /*
40 * Check whether %sp is dangerously low.
41 */
42 ldub [%g6 + TI_FPDEPTH], %g1
43 srl %g1, 1, %g3
44 add %g3, 1, %g3
45 sllx %g3, 8, %g3 ! each fpregs frame is 256b
46 add %g3, 192, %g3
47 add %g6, %g3, %g3 ! where does task_struct+frame end?
48 sub %g3, STACK_BIAS, %g3
49 cmp %sp, %g3
50 bg,pt %xcc, 1f
51 nop
52 lduh [%g6 + TI_CPU], %g1
53 sethi %hi(hardirq_stack), %g3
54 or %g3, %lo(hardirq_stack), %g3
55 sllx %g1, 3, %g1
56 ldx [%g3 + %g1], %g7
57 sub %g7, STACK_BIAS, %g7
58 cmp %sp, %g7
59 bleu,pt %xcc, 2f
60 sethi %hi(THREAD_SIZE), %g3
61 add %g7, %g3, %g7
62 cmp %sp, %g7
63 blu,pn %xcc, 1f
642: sethi %hi(softirq_stack), %g3
65 or %g3, %lo(softirq_stack), %g3
66 ldx [%g3 + %g1], %g7
67 sub %g7, STACK_BIAS, %g7
68 cmp %sp, %g7
69 bleu,pt %xcc, 3f
70 sethi %hi(THREAD_SIZE), %g3
71 add %g7, %g3, %g7
72 cmp %sp, %g7
73 blu,pn %xcc, 1f
74 nop
75 /* If we are already on ovstack, don't hop onto it
76 * again, we are already trying to output the stack overflow
77 * message.
78 */
793: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough
80 or %g7, %lo(ovstack), %g7
81 add %g7, OVSTACKSIZE, %g3
82 sub %g3, STACK_BIAS + 192, %g3
83 sub %g7, STACK_BIAS, %g7
84 cmp %sp, %g7
85 blu,pn %xcc, 2f
86 cmp %sp, %g3
87 bleu,pn %xcc, 1f
88 nop
892: mov %g3, %sp
90 sethi %hi(panicstring), %g3
91 call prom_printf
92 or %g3, %lo(panicstring), %o0
93 call prom_halt
94 nop
951:
96#endif
97#ifdef CONFIG_FUNCTION_TRACER 23#ifdef CONFIG_FUNCTION_TRACER
98#ifdef CONFIG_DYNAMIC_FTRACE 24#ifdef CONFIG_DYNAMIC_FTRACE
99 mov %o7, %o0 25 /* Do nothing, the retl/nop below is all we need. */
100 .globl mcount_call
101mcount_call:
102 call ftrace_stub
103 mov %o0, %o7
104#else 26#else
105 sethi %hi(ftrace_trace_function), %g1 27 sethi %hi(function_trace_stop), %g1
28 lduw [%g1 + %lo(function_trace_stop)], %g2
29 brnz,pn %g2, 2f
30 sethi %hi(ftrace_trace_function), %g1
106 sethi %hi(ftrace_stub), %g2 31 sethi %hi(ftrace_stub), %g2
107 ldx [%g1 + %lo(ftrace_trace_function)], %g1 32 ldx [%g1 + %lo(ftrace_trace_function)], %g1
108 or %g2, %lo(ftrace_stub), %g2 33 or %g2, %lo(ftrace_stub), %g2
109 cmp %g1, %g2 34 cmp %g1, %g2
110 be,pn %icc, 1f 35 be,pn %icc, 1f
111 mov %i7, %o1 36 mov %i7, %g3
112 jmpl %g1, %g0 37 save %sp, -176, %sp
113 mov %o7, %o0 38 mov %g3, %o1
39 jmpl %g1, %o7
40 mov %i7, %o0
41 ret
42 restore
114 /* not reached */ 43 /* not reached */
1151: 441:
45#ifdef CONFIG_FUNCTION_GRAPH_TRACER
46 sethi %hi(ftrace_graph_return), %g1
47 ldx [%g1 + %lo(ftrace_graph_return)], %g3
48 cmp %g2, %g3
49 bne,pn %xcc, 5f
50 sethi %hi(ftrace_graph_entry_stub), %g2
51 sethi %hi(ftrace_graph_entry), %g1
52 or %g2, %lo(ftrace_graph_entry_stub), %g2
53 ldx [%g1 + %lo(ftrace_graph_entry)], %g1
54 cmp %g1, %g2
55 be,pt %xcc, 2f
56 nop
575: mov %i7, %g2
58 mov %fp, %g3
59 save %sp, -176, %sp
60 mov %g2, %l0
61 ba,pt %xcc, ftrace_graph_caller
62 mov %g3, %l1
63#endif
642:
116#endif 65#endif
117#endif 66#endif
118 retl 67 retl
@@ -131,14 +80,50 @@ ftrace_stub:
131 .globl ftrace_caller 80 .globl ftrace_caller
132 .type ftrace_caller,#function 81 .type ftrace_caller,#function
133ftrace_caller: 82ftrace_caller:
134 mov %i7, %o1 83 sethi %hi(function_trace_stop), %g1
135 mov %o7, %o0 84 mov %i7, %g2
85 lduw [%g1 + %lo(function_trace_stop)], %g1
86 brnz,pn %g1, ftrace_stub
87 mov %fp, %g3
88 save %sp, -176, %sp
89 mov %g2, %o1
90 mov %g2, %l0
91 mov %g3, %l1
136 .globl ftrace_call 92 .globl ftrace_call
137ftrace_call: 93ftrace_call:
138 call ftrace_stub 94 call ftrace_stub
139 mov %o0, %o7 95 mov %i7, %o0
140 retl 96#ifdef CONFIG_FUNCTION_GRAPH_TRACER
97 .globl ftrace_graph_call
98ftrace_graph_call:
99 call ftrace_stub
141 nop 100 nop
101#endif
102 ret
103 restore
104#ifdef CONFIG_FUNCTION_GRAPH_TRACER
105 .size ftrace_graph_call,.-ftrace_graph_call
106#endif
107 .size ftrace_call,.-ftrace_call
142 .size ftrace_caller,.-ftrace_caller 108 .size ftrace_caller,.-ftrace_caller
143#endif 109#endif
144#endif 110#endif
111
112#ifdef CONFIG_FUNCTION_GRAPH_TRACER
113ENTRY(ftrace_graph_caller)
114 mov %l0, %o0
115 mov %i7, %o1
116 call prepare_ftrace_return
117 mov %l1, %o2
118 ret
119 restore %o0, -8, %i7
120END(ftrace_graph_caller)
121
122ENTRY(return_to_handler)
123 save %sp, -176, %sp
124 call ftrace_return_to_handler
125 mov %fp, %o0
126 jmpl %o0 + 8, %g0
127 restore
128END(return_to_handler)
129#endif
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 59b4556a5b92..e790bc1fbfa3 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -626,7 +626,7 @@ ia32_sys_call_table:
626 .quad stub32_sigreturn 626 .quad stub32_sigreturn
627 .quad stub32_clone /* 120 */ 627 .quad stub32_clone /* 120 */
628 .quad sys_setdomainname 628 .quad sys_setdomainname
629 .quad sys_uname 629 .quad sys_newuname
630 .quad sys_modify_ldt 630 .quad sys_modify_ldt
631 .quad compat_sys_adjtimex 631 .quad compat_sys_adjtimex
632 .quad sys32_mprotect /* 125 */ 632 .quad sys32_mprotect /* 125 */
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index ba19ad4c47d0..86a0ff0aeac7 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -21,6 +21,7 @@
21#define _ASM_X86_AMD_IOMMU_TYPES_H 21#define _ASM_X86_AMD_IOMMU_TYPES_H
22 22
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/mutex.h>
24#include <linux/list.h> 25#include <linux/list.h>
25#include <linux/spinlock.h> 26#include <linux/spinlock.h>
26 27
@@ -140,6 +141,7 @@
140 141
141/* constants to configure the command buffer */ 142/* constants to configure the command buffer */
142#define CMD_BUFFER_SIZE 8192 143#define CMD_BUFFER_SIZE 8192
144#define CMD_BUFFER_UNINITIALIZED 1
143#define CMD_BUFFER_ENTRIES 512 145#define CMD_BUFFER_ENTRIES 512
144#define MMIO_CMD_SIZE_SHIFT 56 146#define MMIO_CMD_SIZE_SHIFT 56
145#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) 147#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
@@ -237,6 +239,7 @@ struct protection_domain {
237 struct list_head list; /* for list of all protection domains */ 239 struct list_head list; /* for list of all protection domains */
238 struct list_head dev_list; /* List of all devices in this domain */ 240 struct list_head dev_list; /* List of all devices in this domain */
239 spinlock_t lock; /* mostly used to lock the page table*/ 241 spinlock_t lock; /* mostly used to lock the page table*/
242 struct mutex api_lock; /* protect page tables in the iommu-api path */
240 u16 id; /* the domain id written to the device table */ 243 u16 id; /* the domain id written to the device table */
241 int mode; /* paging mode (0-6 levels) */ 244 int mode; /* paging mode (0-6 levels) */
242 u64 *pt_root; /* page table root pointer */ 245 u64 *pt_root; /* page table root pointer */
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h
index ba0eed8aa1a6..b60f2924c413 100644
--- a/arch/x86/include/asm/lguest_hcall.h
+++ b/arch/x86/include/asm/lguest_hcall.h
@@ -28,22 +28,39 @@
28 28
29#ifndef __ASSEMBLY__ 29#ifndef __ASSEMBLY__
30#include <asm/hw_irq.h> 30#include <asm/hw_irq.h>
31#include <asm/kvm_para.h>
32 31
33/*G:030 32/*G:030
34 * But first, how does our Guest contact the Host to ask for privileged 33 * But first, how does our Guest contact the Host to ask for privileged
35 * operations? There are two ways: the direct way is to make a "hypercall", 34 * operations? There are two ways: the direct way is to make a "hypercall",
36 * to make requests of the Host Itself. 35 * to make requests of the Host Itself.
37 * 36 *
38 * We use the KVM hypercall mechanism, though completely different hypercall 37 * Our hypercall mechanism uses the highest unused trap code (traps 32 and
39 * numbers. Seventeen hypercalls are available: the hypercall number is put in 38 * above are used by real hardware interrupts). Seventeen hypercalls are
40 * the %eax register, and the arguments (when required) are placed in %ebx, 39 * available: the hypercall number is put in the %eax register, and the
41 * %ecx, %edx and %esi. If a return value makes sense, it's returned in %eax. 40 * arguments (when required) are placed in %ebx, %ecx, %edx and %esi.
41 * If a return value makes sense, it's returned in %eax.
42 * 42 *
43 * Grossly invalid calls result in Sudden Death at the hands of the vengeful 43 * Grossly invalid calls result in Sudden Death at the hands of the vengeful
44 * Host, rather than returning failure. This reflects Winston Churchill's 44 * Host, rather than returning failure. This reflects Winston Churchill's
45 * definition of a gentleman: "someone who is only rude intentionally". 45 * definition of a gentleman: "someone who is only rude intentionally".
46:*/ 46 */
47static inline unsigned long
48hcall(unsigned long call,
49 unsigned long arg1, unsigned long arg2, unsigned long arg3,
50 unsigned long arg4)
51{
52 /* "int" is the Intel instruction to trigger a trap. */
53 asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY)
54 /* The call in %eax (aka "a") might be overwritten */
55 : "=a"(call)
56 /* The arguments are in %eax, %ebx, %ecx, %edx & %esi */
57 : "a"(call), "b"(arg1), "c"(arg2), "d"(arg3), "S"(arg4)
58 /* "memory" means this might write somewhere in memory.
59 * This isn't true for all calls, but it's safe to tell
60 * gcc that it might happen so it doesn't get clever. */
61 : "memory");
62 return call;
63}
47 64
48/* Can't use our min() macro here: needs to be a constant */ 65/* Can't use our min() macro here: needs to be a constant */
49#define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) 66#define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32)
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index f3dadb571d9b..f854d89b7edf 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -118,7 +118,7 @@ static bool check_device(struct device *dev)
118 return false; 118 return false;
119 119
120 /* No device or no PCI device */ 120 /* No device or no PCI device */
121 if (!dev || dev->bus != &pci_bus_type) 121 if (dev->bus != &pci_bus_type)
122 return false; 122 return false;
123 123
124 devid = get_device_id(dev); 124 devid = get_device_id(dev);
@@ -392,6 +392,7 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
392 u32 tail, head; 392 u32 tail, head;
393 u8 *target; 393 u8 *target;
394 394
395 WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
395 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 396 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
396 target = iommu->cmd_buf + tail; 397 target = iommu->cmd_buf + tail;
397 memcpy_toio(target, cmd, sizeof(*cmd)); 398 memcpy_toio(target, cmd, sizeof(*cmd));
@@ -2186,7 +2187,7 @@ static void prealloc_protection_domains(void)
2186 struct dma_ops_domain *dma_dom; 2187 struct dma_ops_domain *dma_dom;
2187 u16 devid; 2188 u16 devid;
2188 2189
2189 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 2190 for_each_pci_dev(dev) {
2190 2191
2191 /* Do we handle this device? */ 2192 /* Do we handle this device? */
2192 if (!check_device(&dev->dev)) 2193 if (!check_device(&dev->dev))
@@ -2298,7 +2299,7 @@ static void cleanup_domain(struct protection_domain *domain)
2298 list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { 2299 list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
2299 struct device *dev = dev_data->dev; 2300 struct device *dev = dev_data->dev;
2300 2301
2301 do_detach(dev); 2302 __detach_device(dev);
2302 atomic_set(&dev_data->bind, 0); 2303 atomic_set(&dev_data->bind, 0);
2303 } 2304 }
2304 2305
@@ -2327,6 +2328,7 @@ static struct protection_domain *protection_domain_alloc(void)
2327 return NULL; 2328 return NULL;
2328 2329
2329 spin_lock_init(&domain->lock); 2330 spin_lock_init(&domain->lock);
2331 mutex_init(&domain->api_lock);
2330 domain->id = domain_id_alloc(); 2332 domain->id = domain_id_alloc();
2331 if (!domain->id) 2333 if (!domain->id)
2332 goto out_err; 2334 goto out_err;
@@ -2379,9 +2381,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
2379 2381
2380 free_pagetable(domain); 2382 free_pagetable(domain);
2381 2383
2382 domain_id_free(domain->id); 2384 protection_domain_free(domain);
2383
2384 kfree(domain);
2385 2385
2386 dom->priv = NULL; 2386 dom->priv = NULL;
2387} 2387}
@@ -2456,6 +2456,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom,
2456 iova &= PAGE_MASK; 2456 iova &= PAGE_MASK;
2457 paddr &= PAGE_MASK; 2457 paddr &= PAGE_MASK;
2458 2458
2459 mutex_lock(&domain->api_lock);
2460
2459 for (i = 0; i < npages; ++i) { 2461 for (i = 0; i < npages; ++i) {
2460 ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); 2462 ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k);
2461 if (ret) 2463 if (ret)
@@ -2465,6 +2467,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom,
2465 paddr += PAGE_SIZE; 2467 paddr += PAGE_SIZE;
2466 } 2468 }
2467 2469
2470 mutex_unlock(&domain->api_lock);
2471
2468 return 0; 2472 return 0;
2469} 2473}
2470 2474
@@ -2477,12 +2481,16 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom,
2477 2481
2478 iova &= PAGE_MASK; 2482 iova &= PAGE_MASK;
2479 2483
2484 mutex_lock(&domain->api_lock);
2485
2480 for (i = 0; i < npages; ++i) { 2486 for (i = 0; i < npages; ++i) {
2481 iommu_unmap_page(domain, iova, PM_MAP_4k); 2487 iommu_unmap_page(domain, iova, PM_MAP_4k);
2482 iova += PAGE_SIZE; 2488 iova += PAGE_SIZE;
2483 } 2489 }
2484 2490
2485 iommu_flush_tlb_pde(domain); 2491 iommu_flush_tlb_pde(domain);
2492
2493 mutex_unlock(&domain->api_lock);
2486} 2494}
2487 2495
2488static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, 2496static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 42f5350b908f..6360abf993d4 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -138,9 +138,9 @@ int amd_iommus_present;
138bool amd_iommu_np_cache __read_mostly; 138bool amd_iommu_np_cache __read_mostly;
139 139
140/* 140/*
141 * Set to true if ACPI table parsing and hardware intialization went properly 141 * The ACPI table parsing functions set this variable on an error
142 */ 142 */
143static bool amd_iommu_initialized; 143static int __initdata amd_iommu_init_err;
144 144
145/* 145/*
146 * List of protection domains - used during resume 146 * List of protection domains - used during resume
@@ -391,9 +391,11 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table)
391 */ 391 */
392 for (i = 0; i < table->length; ++i) 392 for (i = 0; i < table->length; ++i)
393 checksum += p[i]; 393 checksum += p[i];
394 if (checksum != 0) 394 if (checksum != 0) {
395 /* ACPI table corrupt */ 395 /* ACPI table corrupt */
396 return -ENODEV; 396 amd_iommu_init_err = -ENODEV;
397 return 0;
398 }
397 399
398 p += IVRS_HEADER_LENGTH; 400 p += IVRS_HEADER_LENGTH;
399 401
@@ -436,7 +438,7 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
436 if (cmd_buf == NULL) 438 if (cmd_buf == NULL)
437 return NULL; 439 return NULL;
438 440
439 iommu->cmd_buf_size = CMD_BUFFER_SIZE; 441 iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED;
440 442
441 return cmd_buf; 443 return cmd_buf;
442} 444}
@@ -472,12 +474,13 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu)
472 &entry, sizeof(entry)); 474 &entry, sizeof(entry));
473 475
474 amd_iommu_reset_cmd_buffer(iommu); 476 amd_iommu_reset_cmd_buffer(iommu);
477 iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED);
475} 478}
476 479
477static void __init free_command_buffer(struct amd_iommu *iommu) 480static void __init free_command_buffer(struct amd_iommu *iommu)
478{ 481{
479 free_pages((unsigned long)iommu->cmd_buf, 482 free_pages((unsigned long)iommu->cmd_buf,
480 get_order(iommu->cmd_buf_size)); 483 get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED)));
481} 484}
482 485
483/* allocates the memory where the IOMMU will log its events to */ 486/* allocates the memory where the IOMMU will log its events to */
@@ -920,11 +923,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
920 h->mmio_phys); 923 h->mmio_phys);
921 924
922 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); 925 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
923 if (iommu == NULL) 926 if (iommu == NULL) {
924 return -ENOMEM; 927 amd_iommu_init_err = -ENOMEM;
928 return 0;
929 }
930
925 ret = init_iommu_one(iommu, h); 931 ret = init_iommu_one(iommu, h);
926 if (ret) 932 if (ret) {
927 return ret; 933 amd_iommu_init_err = ret;
934 return 0;
935 }
928 break; 936 break;
929 default: 937 default:
930 break; 938 break;
@@ -934,8 +942,6 @@ static int __init init_iommu_all(struct acpi_table_header *table)
934 } 942 }
935 WARN_ON(p != end); 943 WARN_ON(p != end);
936 944
937 amd_iommu_initialized = true;
938
939 return 0; 945 return 0;
940} 946}
941 947
@@ -1211,6 +1217,10 @@ static int __init amd_iommu_init(void)
1211 if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) 1217 if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0)
1212 return -ENODEV; 1218 return -ENODEV;
1213 1219
1220 ret = amd_iommu_init_err;
1221 if (ret)
1222 goto out;
1223
1214 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); 1224 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
1215 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); 1225 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
1216 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); 1226 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
@@ -1270,12 +1280,19 @@ static int __init amd_iommu_init(void)
1270 if (acpi_table_parse("IVRS", init_iommu_all) != 0) 1280 if (acpi_table_parse("IVRS", init_iommu_all) != 0)
1271 goto free; 1281 goto free;
1272 1282
1273 if (!amd_iommu_initialized) 1283 if (amd_iommu_init_err) {
1284 ret = amd_iommu_init_err;
1274 goto free; 1285 goto free;
1286 }
1275 1287
1276 if (acpi_table_parse("IVRS", init_memory_definitions) != 0) 1288 if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
1277 goto free; 1289 goto free;
1278 1290
1291 if (amd_iommu_init_err) {
1292 ret = amd_iommu_init_err;
1293 goto free;
1294 }
1295
1279 ret = sysdev_class_register(&amd_iommu_sysdev_class); 1296 ret = sysdev_class_register(&amd_iommu_sysdev_class);
1280 if (ret) 1297 if (ret)
1281 goto free; 1298 goto free;
@@ -1288,6 +1305,8 @@ static int __init amd_iommu_init(void)
1288 if (ret) 1305 if (ret)
1289 goto free; 1306 goto free;
1290 1307
1308 enable_iommus();
1309
1291 if (iommu_pass_through) 1310 if (iommu_pass_through)
1292 ret = amd_iommu_init_passthrough(); 1311 ret = amd_iommu_init_passthrough();
1293 else 1312 else
@@ -1300,8 +1319,6 @@ static int __init amd_iommu_init(void)
1300 1319
1301 amd_iommu_init_notifier(); 1320 amd_iommu_init_notifier();
1302 1321
1303 enable_iommus();
1304
1305 if (iommu_pass_through) 1322 if (iommu_pass_through)
1306 goto out; 1323 goto out;
1307 1324
@@ -1315,6 +1332,7 @@ out:
1315 return ret; 1332 return ret;
1316 1333
1317free: 1334free:
1335 disable_iommus();
1318 1336
1319 amd_iommu_uninit_devices(); 1337 amd_iommu_uninit_devices();
1320 1338
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 3704997e8b25..b5d8b0bcf235 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -393,6 +393,7 @@ void __init gart_iommu_hole_init(void)
393 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { 393 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
394 int bus; 394 int bus;
395 int dev_base, dev_limit; 395 int dev_base, dev_limit;
396 u32 ctl;
396 397
397 bus = bus_dev_ranges[i].bus; 398 bus = bus_dev_ranges[i].bus;
398 dev_base = bus_dev_ranges[i].dev_base; 399 dev_base = bus_dev_ranges[i].dev_base;
@@ -406,7 +407,19 @@ void __init gart_iommu_hole_init(void)
406 gart_iommu_aperture = 1; 407 gart_iommu_aperture = 1;
407 x86_init.iommu.iommu_init = gart_iommu_init; 408 x86_init.iommu.iommu_init = gart_iommu_init;
408 409
409 aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7; 410 ctl = read_pci_config(bus, slot, 3,
411 AMD64_GARTAPERTURECTL);
412
413 /*
414 * Before we do anything else disable the GART. It may
415 * still be enabled if we boot into a crash-kernel here.
416 * Reconfiguring the GART while it is enabled could have
417 * unknown side-effects.
418 */
419 ctl &= ~GARTEN;
420 write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
421
422 aper_order = (ctl >> 1) & 7;
410 aper_size = (32 * 1024 * 1024) << aper_order; 423 aper_size = (32 * 1024 * 1024) << aper_order;
411 aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; 424 aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff;
412 aper_base <<= 25; 425 aper_base <<= 25;
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index a4849c10a77e..ebd4c51d096a 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -27,7 +27,6 @@
27#include <asm/cpu.h> 27#include <asm/cpu.h>
28#include <asm/reboot.h> 28#include <asm/reboot.h>
29#include <asm/virtext.h> 29#include <asm/virtext.h>
30#include <asm/x86_init.h>
31 30
32#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) 31#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
33 32
@@ -103,10 +102,5 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
103#ifdef CONFIG_HPET_TIMER 102#ifdef CONFIG_HPET_TIMER
104 hpet_disable(); 103 hpet_disable();
105#endif 104#endif
106
107#ifdef CONFIG_X86_64
108 x86_platform.iommu_shutdown();
109#endif
110
111 crash_save_cpu(regs, safe_smp_processor_id()); 105 crash_save_cpu(regs, safe_smp_processor_id());
112} 106}
diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
index e39e77168a37..e1a93be4fd44 100644
--- a/arch/x86/kernel/dumpstack.h
+++ b/arch/x86/kernel/dumpstack.h
@@ -14,6 +14,8 @@
14#define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :) 14#define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :)
15#endif 15#endif
16 16
17#include <linux/uaccess.h>
18
17extern void 19extern void
18show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, 20show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
19 unsigned long *stack, unsigned long bp, char *log_lvl); 21 unsigned long *stack, unsigned long bp, char *log_lvl);
@@ -42,8 +44,10 @@ static inline unsigned long rewind_frame_pointer(int n)
42 get_bp(frame); 44 get_bp(frame);
43 45
44#ifdef CONFIG_FRAME_POINTER 46#ifdef CONFIG_FRAME_POINTER
45 while (n--) 47 while (n--) {
46 frame = frame->next_frame; 48 if (probe_kernel_address(&frame->next_frame, frame))
49 break;
50 }
47#endif 51#endif
48 52
49 return (unsigned long)frame; 53 return (unsigned long)frame;
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 68cd24f9deae..0f7f130caa67 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -565,6 +565,9 @@ static void enable_gart_translations(void)
565 565
566 enable_gart_translation(dev, __pa(agp_gatt_table)); 566 enable_gart_translation(dev, __pa(agp_gatt_table));
567 } 567 }
568
569 /* Flush the GART-TLB to remove stale entries */
570 k8_flush_garts();
568} 571}
569 572
570/* 573/*
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 7e59dc1d3fc2..2bdf628066bd 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -115,7 +115,7 @@ static void async_hcall(unsigned long call, unsigned long arg1,
115 local_irq_save(flags); 115 local_irq_save(flags);
116 if (lguest_data.hcall_status[next_call] != 0xFF) { 116 if (lguest_data.hcall_status[next_call] != 0xFF) {
117 /* Table full, so do normal hcall which will flush table. */ 117 /* Table full, so do normal hcall which will flush table. */
118 kvm_hypercall4(call, arg1, arg2, arg3, arg4); 118 hcall(call, arg1, arg2, arg3, arg4);
119 } else { 119 } else {
120 lguest_data.hcalls[next_call].arg0 = call; 120 lguest_data.hcalls[next_call].arg0 = call;
121 lguest_data.hcalls[next_call].arg1 = arg1; 121 lguest_data.hcalls[next_call].arg1 = arg1;
@@ -145,46 +145,45 @@ static void async_hcall(unsigned long call, unsigned long arg1,
145 * So, when we're in lazy mode, we call async_hcall() to store the call for 145 * So, when we're in lazy mode, we call async_hcall() to store the call for
146 * future processing: 146 * future processing:
147 */ 147 */
148static void lazy_hcall1(unsigned long call, 148static void lazy_hcall1(unsigned long call, unsigned long arg1)
149 unsigned long arg1)
150{ 149{
151 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) 150 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
152 kvm_hypercall1(call, arg1); 151 hcall(call, arg1, 0, 0, 0);
153 else 152 else
154 async_hcall(call, arg1, 0, 0, 0); 153 async_hcall(call, arg1, 0, 0, 0);
155} 154}
156 155
157/* You can imagine what lazy_hcall2, 3 and 4 look like. :*/ 156/* You can imagine what lazy_hcall2, 3 and 4 look like. :*/
158static void lazy_hcall2(unsigned long call, 157static void lazy_hcall2(unsigned long call,
159 unsigned long arg1, 158 unsigned long arg1,
160 unsigned long arg2) 159 unsigned long arg2)
161{ 160{
162 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) 161 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
163 kvm_hypercall2(call, arg1, arg2); 162 hcall(call, arg1, arg2, 0, 0);
164 else 163 else
165 async_hcall(call, arg1, arg2, 0, 0); 164 async_hcall(call, arg1, arg2, 0, 0);
166} 165}
167 166
168static void lazy_hcall3(unsigned long call, 167static void lazy_hcall3(unsigned long call,
169 unsigned long arg1, 168 unsigned long arg1,
170 unsigned long arg2, 169 unsigned long arg2,
171 unsigned long arg3) 170 unsigned long arg3)
172{ 171{
173 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) 172 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
174 kvm_hypercall3(call, arg1, arg2, arg3); 173 hcall(call, arg1, arg2, arg3, 0);
175 else 174 else
176 async_hcall(call, arg1, arg2, arg3, 0); 175 async_hcall(call, arg1, arg2, arg3, 0);
177} 176}
178 177
179#ifdef CONFIG_X86_PAE 178#ifdef CONFIG_X86_PAE
180static void lazy_hcall4(unsigned long call, 179static void lazy_hcall4(unsigned long call,
181 unsigned long arg1, 180 unsigned long arg1,
182 unsigned long arg2, 181 unsigned long arg2,
183 unsigned long arg3, 182 unsigned long arg3,
184 unsigned long arg4) 183 unsigned long arg4)
185{ 184{
186 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) 185 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
187 kvm_hypercall4(call, arg1, arg2, arg3, arg4); 186 hcall(call, arg1, arg2, arg3, arg4);
188 else 187 else
189 async_hcall(call, arg1, arg2, arg3, arg4); 188 async_hcall(call, arg1, arg2, arg3, arg4);
190} 189}
@@ -196,13 +195,13 @@ static void lazy_hcall4(unsigned long call,
196:*/ 195:*/
197static void lguest_leave_lazy_mmu_mode(void) 196static void lguest_leave_lazy_mmu_mode(void)
198{ 197{
199 kvm_hypercall0(LHCALL_FLUSH_ASYNC); 198 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
200 paravirt_leave_lazy_mmu(); 199 paravirt_leave_lazy_mmu();
201} 200}
202 201
203static void lguest_end_context_switch(struct task_struct *next) 202static void lguest_end_context_switch(struct task_struct *next)
204{ 203{
205 kvm_hypercall0(LHCALL_FLUSH_ASYNC); 204 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
206 paravirt_end_context_switch(next); 205 paravirt_end_context_switch(next);
207} 206}
208 207
@@ -286,7 +285,7 @@ static void lguest_write_idt_entry(gate_desc *dt,
286 /* Keep the local copy up to date. */ 285 /* Keep the local copy up to date. */
287 native_write_idt_entry(dt, entrynum, g); 286 native_write_idt_entry(dt, entrynum, g);
288 /* Tell Host about this new entry. */ 287 /* Tell Host about this new entry. */
289 kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]); 288 hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1], 0);
290} 289}
291 290
292/* 291/*
@@ -300,7 +299,7 @@ static void lguest_load_idt(const struct desc_ptr *desc)
300 struct desc_struct *idt = (void *)desc->address; 299 struct desc_struct *idt = (void *)desc->address;
301 300
302 for (i = 0; i < (desc->size+1)/8; i++) 301 for (i = 0; i < (desc->size+1)/8; i++)
303 kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b); 302 hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b, 0);
304} 303}
305 304
306/* 305/*
@@ -321,7 +320,7 @@ static void lguest_load_gdt(const struct desc_ptr *desc)
321 struct desc_struct *gdt = (void *)desc->address; 320 struct desc_struct *gdt = (void *)desc->address;
322 321
323 for (i = 0; i < (desc->size+1)/8; i++) 322 for (i = 0; i < (desc->size+1)/8; i++)
324 kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b); 323 hcall(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b, 0);
325} 324}
326 325
327/* 326/*
@@ -334,8 +333,8 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
334{ 333{
335 native_write_gdt_entry(dt, entrynum, desc, type); 334 native_write_gdt_entry(dt, entrynum, desc, type);
336 /* Tell Host about this new entry. */ 335 /* Tell Host about this new entry. */
337 kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, entrynum, 336 hcall(LHCALL_LOAD_GDT_ENTRY, entrynum,
338 dt[entrynum].a, dt[entrynum].b); 337 dt[entrynum].a, dt[entrynum].b, 0);
339} 338}
340 339
341/* 340/*
@@ -931,7 +930,7 @@ static int lguest_clockevent_set_next_event(unsigned long delta,
931 } 930 }
932 931
933 /* Please wake us this far in the future. */ 932 /* Please wake us this far in the future. */
934 kvm_hypercall1(LHCALL_SET_CLOCKEVENT, delta); 933 hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0, 0);
935 return 0; 934 return 0;
936} 935}
937 936
@@ -942,7 +941,7 @@ static void lguest_clockevent_set_mode(enum clock_event_mode mode,
942 case CLOCK_EVT_MODE_UNUSED: 941 case CLOCK_EVT_MODE_UNUSED:
943 case CLOCK_EVT_MODE_SHUTDOWN: 942 case CLOCK_EVT_MODE_SHUTDOWN:
944 /* A 0 argument shuts the clock down. */ 943 /* A 0 argument shuts the clock down. */
945 kvm_hypercall0(LHCALL_SET_CLOCKEVENT); 944 hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0, 0);
946 break; 945 break;
947 case CLOCK_EVT_MODE_ONESHOT: 946 case CLOCK_EVT_MODE_ONESHOT:
948 /* This is what we expect. */ 947 /* This is what we expect. */
@@ -1100,7 +1099,7 @@ static void set_lguest_basic_apic_ops(void)
1100/* STOP! Until an interrupt comes in. */ 1099/* STOP! Until an interrupt comes in. */
1101static void lguest_safe_halt(void) 1100static void lguest_safe_halt(void)
1102{ 1101{
1103 kvm_hypercall0(LHCALL_HALT); 1102 hcall(LHCALL_HALT, 0, 0, 0, 0);
1104} 1103}
1105 1104
1106/* 1105/*
@@ -1112,8 +1111,8 @@ static void lguest_safe_halt(void)
1112 */ 1111 */
1113static void lguest_power_off(void) 1112static void lguest_power_off(void)
1114{ 1113{
1115 kvm_hypercall2(LHCALL_SHUTDOWN, __pa("Power down"), 1114 hcall(LHCALL_SHUTDOWN, __pa("Power down"),
1116 LGUEST_SHUTDOWN_POWEROFF); 1115 LGUEST_SHUTDOWN_POWEROFF, 0, 0);
1117} 1116}
1118 1117
1119/* 1118/*
@@ -1123,7 +1122,7 @@ static void lguest_power_off(void)
1123 */ 1122 */
1124static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p) 1123static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p)
1125{ 1124{
1126 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF); 1125 hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0, 0);
1127 /* The hcall won't return, but to keep gcc happy, we're "done". */ 1126 /* The hcall won't return, but to keep gcc happy, we're "done". */
1128 return NOTIFY_DONE; 1127 return NOTIFY_DONE;
1129} 1128}
@@ -1162,7 +1161,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
1162 len = sizeof(scratch) - 1; 1161 len = sizeof(scratch) - 1;
1163 scratch[len] = '\0'; 1162 scratch[len] = '\0';
1164 memcpy(scratch, buf, len); 1163 memcpy(scratch, buf, len);
1165 kvm_hypercall1(LHCALL_NOTIFY, __pa(scratch)); 1164 hcall(LHCALL_NOTIFY, __pa(scratch), 0, 0, 0);
1166 1165
1167 /* This routine returns the number of bytes actually written. */ 1166 /* This routine returns the number of bytes actually written. */
1168 return len; 1167 return len;
@@ -1174,7 +1173,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
1174 */ 1173 */
1175static void lguest_restart(char *reason) 1174static void lguest_restart(char *reason)
1176{ 1175{
1177 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART); 1176 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
1178} 1177}
1179 1178
1180/*G:050 1179/*G:050
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S
index 27eac0faee48..4f420c2f2d55 100644
--- a/arch/x86/lguest/i386_head.S
+++ b/arch/x86/lguest/i386_head.S
@@ -32,7 +32,7 @@ ENTRY(lguest_entry)
32 */ 32 */
33 movl $LHCALL_LGUEST_INIT, %eax 33 movl $LHCALL_LGUEST_INIT, %eax
34 movl $lguest_data - __PAGE_OFFSET, %ebx 34 movl $lguest_data - __PAGE_OFFSET, %ebx
35 .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */ 35 int $LGUEST_TRAP_ENTRY
36 36
37 /* Set up the initial stack so we can run C code. */ 37 /* Set up the initial stack so we can run C code. */
38 movl $(init_thread_union+THREAD_SIZE),%esp 38 movl $(init_thread_union+THREAD_SIZE),%esp
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index a610ebe18edd..2fbfe51fb141 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -471,13 +471,18 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
471 /* allow full data read from EC address space */ 471 /* allow full data read from EC address space */
472 if (obj_desc->field.region_obj->region.space_id == 472 if (obj_desc->field.region_obj->region.space_id ==
473 ACPI_ADR_SPACE_EC) { 473 ACPI_ADR_SPACE_EC) {
474 if (obj_desc->common_field.bit_length > 8) 474 if (obj_desc->common_field.bit_length > 8) {
475 obj_desc->common_field.access_bit_width = 475 unsigned width =
476 ACPI_ROUND_UP(obj_desc->common_field. 476 ACPI_ROUND_BITS_UP_TO_BYTES(
477 bit_length, 8); 477 obj_desc->common_field.bit_length);
478 // access_bit_width is u8, don't overflow it
479 if (width > 8)
480 width = 8;
478 obj_desc->common_field.access_byte_width = 481 obj_desc->common_field.access_byte_width =
479 ACPI_DIV_8(obj_desc->common_field. 482 width;
480 access_bit_width); 483 obj_desc->common_field.access_bit_width =
484 8 * width;
485 }
481 } 486 }
482 487
483 ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, 488 ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index b86712167eb8..b9101818b47b 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -68,7 +68,7 @@ static int atmtcp_send_control(struct atm_vcc *vcc,int type,
68 *(struct atm_vcc **) &new_msg->vcc = vcc; 68 *(struct atm_vcc **) &new_msg->vcc = vcc;
69 old_test = test_bit(flag,&vcc->flags); 69 old_test = test_bit(flag,&vcc->flags);
70 out_vcc->push(out_vcc,skb); 70 out_vcc->push(out_vcc,skb);
71 add_wait_queue(sk_atm(vcc)->sk_sleep, &wait); 71 add_wait_queue(sk_sleep(sk_atm(vcc)), &wait);
72 while (test_bit(flag,&vcc->flags) == old_test) { 72 while (test_bit(flag,&vcc->flags) == old_test) {
73 mb(); 73 mb();
74 out_vcc = PRIV(vcc->dev) ? PRIV(vcc->dev)->vcc : NULL; 74 out_vcc = PRIV(vcc->dev) ? PRIV(vcc->dev)->vcc : NULL;
@@ -80,7 +80,7 @@ static int atmtcp_send_control(struct atm_vcc *vcc,int type,
80 schedule(); 80 schedule();
81 } 81 }
82 set_current_state(TASK_RUNNING); 82 set_current_state(TASK_RUNNING);
83 remove_wait_queue(sk_atm(vcc)->sk_sleep, &wait); 83 remove_wait_queue(sk_sleep(sk_atm(vcc)), &wait);
84 return error; 84 return error;
85} 85}
86 86
@@ -105,7 +105,7 @@ static int atmtcp_recv_control(const struct atmtcp_control *msg)
105 msg->type); 105 msg->type);
106 return -EINVAL; 106 return -EINVAL;
107 } 107 }
108 wake_up(sk_atm(vcc)->sk_sleep); 108 wake_up(sk_sleep(sk_atm(vcc)));
109 return 0; 109 return 0;
110} 110}
111 111
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index d41331bc2aa7..aa4248efc5d8 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -1817,8 +1817,6 @@ static int intel_845_configure(void)
1817 pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1)); 1817 pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1));
1818 /* clear any possible error conditions */ 1818 /* clear any possible error conditions */
1819 pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c); 1819 pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c);
1820
1821 intel_i830_setup_flush();
1822 return 0; 1820 return 0;
1823} 1821}
1824 1822
@@ -2188,7 +2186,6 @@ static const struct agp_bridge_driver intel_845_driver = {
2188 .agp_destroy_page = agp_generic_destroy_page, 2186 .agp_destroy_page = agp_generic_destroy_page,
2189 .agp_destroy_pages = agp_generic_destroy_pages, 2187 .agp_destroy_pages = agp_generic_destroy_pages,
2190 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 2188 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
2191 .chipset_flush = intel_i830_chipset_flush,
2192}; 2189};
2193 2190
2194static const struct agp_bridge_driver intel_850_driver = { 2191static const struct agp_bridge_driver intel_850_driver = {
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index c9bc896d68af..90b199f97bec 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1026,14 +1026,16 @@ static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count,
1026 1026
1027 xoutb(0, REG_FLAGS1(iobase)); /* clear detectCMM */ 1027 xoutb(0, REG_FLAGS1(iobase)); /* clear detectCMM */
1028 /* last check before exit */ 1028 /* last check before exit */
1029 if (!io_detect_cm4000(iobase, dev)) 1029 if (!io_detect_cm4000(iobase, dev)) {
1030 count = -ENODEV; 1030 rc = -ENODEV;
1031 goto release_io;
1032 }
1031 1033
1032 if (test_bit(IS_INVREV, &dev->flags) && count > 0) 1034 if (test_bit(IS_INVREV, &dev->flags) && count > 0)
1033 str_invert_revert(dev->rbuf, count); 1035 str_invert_revert(dev->rbuf, count);
1034 1036
1035 if (copy_to_user(buf, dev->rbuf, count)) 1037 if (copy_to_user(buf, dev->rbuf, count))
1036 return -EFAULT; 1038 rc = -EFAULT;
1037 1039
1038release_io: 1040release_io:
1039 clear_bit(LOCK_IO, &dev->flags); 1041 clear_bit(LOCK_IO, &dev->flags);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 702dcc98c074..14a34d99eea2 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -960,6 +960,8 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
960 u.packet.header_length = GET_HEADER_LENGTH(control); 960 u.packet.header_length = GET_HEADER_LENGTH(control);
961 961
962 if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { 962 if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
963 if (u.packet.header_length % 4 != 0)
964 return -EINVAL;
963 header_length = u.packet.header_length; 965 header_length = u.packet.header_length;
964 } else { 966 } else {
965 /* 967 /*
@@ -969,7 +971,8 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
969 if (ctx->header_size == 0) { 971 if (ctx->header_size == 0) {
970 if (u.packet.header_length > 0) 972 if (u.packet.header_length > 0)
971 return -EINVAL; 973 return -EINVAL;
972 } else if (u.packet.header_length % ctx->header_size != 0) { 974 } else if (u.packet.header_length == 0 ||
975 u.packet.header_length % ctx->header_size != 0) {
973 return -EINVAL; 976 return -EINVAL;
974 } 977 }
975 header_length = 0; 978 header_length = 0;
@@ -1354,24 +1357,24 @@ static int dispatch_ioctl(struct client *client,
1354 return -ENODEV; 1357 return -ENODEV;
1355 1358
1356 if (_IOC_TYPE(cmd) != '#' || 1359 if (_IOC_TYPE(cmd) != '#' ||
1357 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) 1360 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
1361 _IOC_SIZE(cmd) > sizeof(buffer))
1358 return -EINVAL; 1362 return -EINVAL;
1359 1363
1360 if (_IOC_DIR(cmd) & _IOC_WRITE) { 1364 if (_IOC_DIR(cmd) == _IOC_READ)
1361 if (_IOC_SIZE(cmd) > sizeof(buffer) || 1365 memset(&buffer, 0, _IOC_SIZE(cmd));
1362 copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) 1366
1367 if (_IOC_DIR(cmd) & _IOC_WRITE)
1368 if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1363 return -EFAULT; 1369 return -EFAULT;
1364 }
1365 1370
1366 ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer); 1371 ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1367 if (ret < 0) 1372 if (ret < 0)
1368 return ret; 1373 return ret;
1369 1374
1370 if (_IOC_DIR(cmd) & _IOC_READ) { 1375 if (_IOC_DIR(cmd) & _IOC_READ)
1371 if (_IOC_SIZE(cmd) > sizeof(buffer) || 1376 if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1372 copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1373 return -EFAULT; 1377 return -EFAULT;
1374 }
1375 1378
1376 return ret; 1379 return ret;
1377} 1380}
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index b743411d8144..a0c365f2e521 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -516,8 +516,6 @@ void drm_put_dev(struct drm_device *dev)
516 } 516 }
517 driver = dev->driver; 517 driver = dev->driver;
518 518
519 drm_vblank_cleanup(dev);
520
521 drm_lastclose(dev); 519 drm_lastclose(dev);
522 520
523 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && 521 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
@@ -537,6 +535,8 @@ void drm_put_dev(struct drm_device *dev)
537 dev->agp = NULL; 535 dev->agp = NULL;
538 } 536 }
539 537
538 drm_vblank_cleanup(dev);
539
540 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) 540 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
541 drm_rmmap(dev, r_list->map); 541 drm_rmmap(dev, r_list->map);
542 drm_ht_remove(&dev->map_hash); 542 drm_ht_remove(&dev->map_hash);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b574503dddd0..a0b8447b06e7 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -226,7 +226,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
226 } else { 226 } else {
227 struct drm_i915_gem_object *obj_priv; 227 struct drm_i915_gem_object *obj_priv;
228 228
229 obj_priv = obj->driver_private; 229 obj_priv = to_intel_bo(obj);
230 seq_printf(m, "Fenced object[%2d] = %p: %s " 230 seq_printf(m, "Fenced object[%2d] = %p: %s "
231 "%08x %08zx %08x %s %08x %08x %d", 231 "%08x %08zx %08x %s %08x %08x %d",
232 i, obj, get_pin_flag(obj_priv), 232 i, obj, get_pin_flag(obj_priv),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 4b26919abdb2..0af3dcc85ce9 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -80,14 +80,14 @@ const static struct intel_device_info intel_i915g_info = {
80 .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, 80 .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
81}; 81};
82const static struct intel_device_info intel_i915gm_info = { 82const static struct intel_device_info intel_i915gm_info = {
83 .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, 83 .is_i9xx = 1, .is_mobile = 1,
84 .cursor_needs_physical = 1, 84 .cursor_needs_physical = 1,
85}; 85};
86const static struct intel_device_info intel_i945g_info = { 86const static struct intel_device_info intel_i945g_info = {
87 .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, 87 .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
88}; 88};
89const static struct intel_device_info intel_i945gm_info = { 89const static struct intel_device_info intel_i945gm_info = {
90 .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, 90 .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
91 .has_hotplug = 1, .cursor_needs_physical = 1, 91 .has_hotplug = 1, .cursor_needs_physical = 1,
92}; 92};
93 93
@@ -361,7 +361,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
361 !dev_priv->mm.suspended) { 361 !dev_priv->mm.suspended) {
362 drm_i915_ring_buffer_t *ring = &dev_priv->ring; 362 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
363 struct drm_gem_object *obj = ring->ring_obj; 363 struct drm_gem_object *obj = ring->ring_obj;
364 struct drm_i915_gem_object *obj_priv = obj->driver_private; 364 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
365 dev_priv->mm.suspended = 0; 365 dev_priv->mm.suspended = 0;
366 366
367 /* Stop the ring if it's running. */ 367 /* Stop the ring if it's running. */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index aba8260fbc5e..6960849522f8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -611,6 +611,8 @@ typedef struct drm_i915_private {
611 /* Reclocking support */ 611 /* Reclocking support */
612 bool render_reclock_avail; 612 bool render_reclock_avail;
613 bool lvds_downclock_avail; 613 bool lvds_downclock_avail;
614 /* indicate whether the LVDS EDID is OK */
615 bool lvds_edid_good;
614 /* indicates the reduced downclock for LVDS*/ 616 /* indicates the reduced downclock for LVDS*/
615 int lvds_downclock; 617 int lvds_downclock;
616 struct work_struct idle_work; 618 struct work_struct idle_work;
@@ -731,6 +733,8 @@ struct drm_i915_gem_object {
731 atomic_t pending_flip; 733 atomic_t pending_flip;
732}; 734};
733 735
736#define to_intel_bo(x) ((struct drm_i915_gem_object *) (x)->driver_private)
737
734/** 738/**
735 * Request queue structure. 739 * Request queue structure.
736 * 740 *
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 368d726853d1..80871c62a571 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -163,7 +163,7 @@ fast_shmem_read(struct page **pages,
163static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) 163static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
164{ 164{
165 drm_i915_private_t *dev_priv = obj->dev->dev_private; 165 drm_i915_private_t *dev_priv = obj->dev->dev_private;
166 struct drm_i915_gem_object *obj_priv = obj->driver_private; 166 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
167 167
168 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 168 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
169 obj_priv->tiling_mode != I915_TILING_NONE; 169 obj_priv->tiling_mode != I915_TILING_NONE;
@@ -264,7 +264,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
264 struct drm_i915_gem_pread *args, 264 struct drm_i915_gem_pread *args,
265 struct drm_file *file_priv) 265 struct drm_file *file_priv)
266{ 266{
267 struct drm_i915_gem_object *obj_priv = obj->driver_private; 267 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
268 ssize_t remain; 268 ssize_t remain;
269 loff_t offset, page_base; 269 loff_t offset, page_base;
270 char __user *user_data; 270 char __user *user_data;
@@ -285,7 +285,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
285 if (ret != 0) 285 if (ret != 0)
286 goto fail_put_pages; 286 goto fail_put_pages;
287 287
288 obj_priv = obj->driver_private; 288 obj_priv = to_intel_bo(obj);
289 offset = args->offset; 289 offset = args->offset;
290 290
291 while (remain > 0) { 291 while (remain > 0) {
@@ -354,7 +354,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
354 struct drm_i915_gem_pread *args, 354 struct drm_i915_gem_pread *args,
355 struct drm_file *file_priv) 355 struct drm_file *file_priv)
356{ 356{
357 struct drm_i915_gem_object *obj_priv = obj->driver_private; 357 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
358 struct mm_struct *mm = current->mm; 358 struct mm_struct *mm = current->mm;
359 struct page **user_pages; 359 struct page **user_pages;
360 ssize_t remain; 360 ssize_t remain;
@@ -403,7 +403,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
403 if (ret != 0) 403 if (ret != 0)
404 goto fail_put_pages; 404 goto fail_put_pages;
405 405
406 obj_priv = obj->driver_private; 406 obj_priv = to_intel_bo(obj);
407 offset = args->offset; 407 offset = args->offset;
408 408
409 while (remain > 0) { 409 while (remain > 0) {
@@ -479,7 +479,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
479 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 479 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
480 if (obj == NULL) 480 if (obj == NULL)
481 return -EBADF; 481 return -EBADF;
482 obj_priv = obj->driver_private; 482 obj_priv = to_intel_bo(obj);
483 483
484 /* Bounds check source. 484 /* Bounds check source.
485 * 485 *
@@ -581,7 +581,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
581 struct drm_i915_gem_pwrite *args, 581 struct drm_i915_gem_pwrite *args,
582 struct drm_file *file_priv) 582 struct drm_file *file_priv)
583{ 583{
584 struct drm_i915_gem_object *obj_priv = obj->driver_private; 584 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
585 drm_i915_private_t *dev_priv = dev->dev_private; 585 drm_i915_private_t *dev_priv = dev->dev_private;
586 ssize_t remain; 586 ssize_t remain;
587 loff_t offset, page_base; 587 loff_t offset, page_base;
@@ -605,7 +605,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
605 if (ret) 605 if (ret)
606 goto fail; 606 goto fail;
607 607
608 obj_priv = obj->driver_private; 608 obj_priv = to_intel_bo(obj);
609 offset = obj_priv->gtt_offset + args->offset; 609 offset = obj_priv->gtt_offset + args->offset;
610 610
611 while (remain > 0) { 611 while (remain > 0) {
@@ -655,7 +655,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
655 struct drm_i915_gem_pwrite *args, 655 struct drm_i915_gem_pwrite *args,
656 struct drm_file *file_priv) 656 struct drm_file *file_priv)
657{ 657{
658 struct drm_i915_gem_object *obj_priv = obj->driver_private; 658 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
659 drm_i915_private_t *dev_priv = dev->dev_private; 659 drm_i915_private_t *dev_priv = dev->dev_private;
660 ssize_t remain; 660 ssize_t remain;
661 loff_t gtt_page_base, offset; 661 loff_t gtt_page_base, offset;
@@ -699,7 +699,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
699 if (ret) 699 if (ret)
700 goto out_unpin_object; 700 goto out_unpin_object;
701 701
702 obj_priv = obj->driver_private; 702 obj_priv = to_intel_bo(obj);
703 offset = obj_priv->gtt_offset + args->offset; 703 offset = obj_priv->gtt_offset + args->offset;
704 704
705 while (remain > 0) { 705 while (remain > 0) {
@@ -761,7 +761,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
761 struct drm_i915_gem_pwrite *args, 761 struct drm_i915_gem_pwrite *args,
762 struct drm_file *file_priv) 762 struct drm_file *file_priv)
763{ 763{
764 struct drm_i915_gem_object *obj_priv = obj->driver_private; 764 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
765 ssize_t remain; 765 ssize_t remain;
766 loff_t offset, page_base; 766 loff_t offset, page_base;
767 char __user *user_data; 767 char __user *user_data;
@@ -781,7 +781,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
781 if (ret != 0) 781 if (ret != 0)
782 goto fail_put_pages; 782 goto fail_put_pages;
783 783
784 obj_priv = obj->driver_private; 784 obj_priv = to_intel_bo(obj);
785 offset = args->offset; 785 offset = args->offset;
786 obj_priv->dirty = 1; 786 obj_priv->dirty = 1;
787 787
@@ -829,7 +829,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
829 struct drm_i915_gem_pwrite *args, 829 struct drm_i915_gem_pwrite *args,
830 struct drm_file *file_priv) 830 struct drm_file *file_priv)
831{ 831{
832 struct drm_i915_gem_object *obj_priv = obj->driver_private; 832 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
833 struct mm_struct *mm = current->mm; 833 struct mm_struct *mm = current->mm;
834 struct page **user_pages; 834 struct page **user_pages;
835 ssize_t remain; 835 ssize_t remain;
@@ -877,7 +877,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
877 if (ret != 0) 877 if (ret != 0)
878 goto fail_put_pages; 878 goto fail_put_pages;
879 879
880 obj_priv = obj->driver_private; 880 obj_priv = to_intel_bo(obj);
881 offset = args->offset; 881 offset = args->offset;
882 obj_priv->dirty = 1; 882 obj_priv->dirty = 1;
883 883
@@ -952,7 +952,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
952 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 952 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
953 if (obj == NULL) 953 if (obj == NULL)
954 return -EBADF; 954 return -EBADF;
955 obj_priv = obj->driver_private; 955 obj_priv = to_intel_bo(obj);
956 956
957 /* Bounds check destination. 957 /* Bounds check destination.
958 * 958 *
@@ -1034,7 +1034,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1034 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 1034 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1035 if (obj == NULL) 1035 if (obj == NULL)
1036 return -EBADF; 1036 return -EBADF;
1037 obj_priv = obj->driver_private; 1037 obj_priv = to_intel_bo(obj);
1038 1038
1039 mutex_lock(&dev->struct_mutex); 1039 mutex_lock(&dev->struct_mutex);
1040 1040
@@ -1096,7 +1096,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1096 DRM_INFO("%s: sw_finish %d (%p %zd)\n", 1096 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
1097 __func__, args->handle, obj, obj->size); 1097 __func__, args->handle, obj, obj->size);
1098#endif 1098#endif
1099 obj_priv = obj->driver_private; 1099 obj_priv = to_intel_bo(obj);
1100 1100
1101 /* Pinned buffers may be scanout, so flush the cache */ 1101 /* Pinned buffers may be scanout, so flush the cache */
1102 if (obj_priv->pin_count) 1102 if (obj_priv->pin_count)
@@ -1167,7 +1167,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1167 struct drm_gem_object *obj = vma->vm_private_data; 1167 struct drm_gem_object *obj = vma->vm_private_data;
1168 struct drm_device *dev = obj->dev; 1168 struct drm_device *dev = obj->dev;
1169 struct drm_i915_private *dev_priv = dev->dev_private; 1169 struct drm_i915_private *dev_priv = dev->dev_private;
1170 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1170 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1171 pgoff_t page_offset; 1171 pgoff_t page_offset;
1172 unsigned long pfn; 1172 unsigned long pfn;
1173 int ret = 0; 1173 int ret = 0;
@@ -1234,7 +1234,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1234{ 1234{
1235 struct drm_device *dev = obj->dev; 1235 struct drm_device *dev = obj->dev;
1236 struct drm_gem_mm *mm = dev->mm_private; 1236 struct drm_gem_mm *mm = dev->mm_private;
1237 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1237 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1238 struct drm_map_list *list; 1238 struct drm_map_list *list;
1239 struct drm_local_map *map; 1239 struct drm_local_map *map;
1240 int ret = 0; 1240 int ret = 0;
@@ -1305,7 +1305,7 @@ void
1305i915_gem_release_mmap(struct drm_gem_object *obj) 1305i915_gem_release_mmap(struct drm_gem_object *obj)
1306{ 1306{
1307 struct drm_device *dev = obj->dev; 1307 struct drm_device *dev = obj->dev;
1308 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1308 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1309 1309
1310 if (dev->dev_mapping) 1310 if (dev->dev_mapping)
1311 unmap_mapping_range(dev->dev_mapping, 1311 unmap_mapping_range(dev->dev_mapping,
@@ -1316,7 +1316,7 @@ static void
1316i915_gem_free_mmap_offset(struct drm_gem_object *obj) 1316i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1317{ 1317{
1318 struct drm_device *dev = obj->dev; 1318 struct drm_device *dev = obj->dev;
1319 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1319 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1320 struct drm_gem_mm *mm = dev->mm_private; 1320 struct drm_gem_mm *mm = dev->mm_private;
1321 struct drm_map_list *list; 1321 struct drm_map_list *list;
1322 1322
@@ -1347,7 +1347,7 @@ static uint32_t
1347i915_gem_get_gtt_alignment(struct drm_gem_object *obj) 1347i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1348{ 1348{
1349 struct drm_device *dev = obj->dev; 1349 struct drm_device *dev = obj->dev;
1350 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1350 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1351 int start, i; 1351 int start, i;
1352 1352
1353 /* 1353 /*
@@ -1406,7 +1406,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1406 1406
1407 mutex_lock(&dev->struct_mutex); 1407 mutex_lock(&dev->struct_mutex);
1408 1408
1409 obj_priv = obj->driver_private; 1409 obj_priv = to_intel_bo(obj);
1410 1410
1411 if (obj_priv->madv != I915_MADV_WILLNEED) { 1411 if (obj_priv->madv != I915_MADV_WILLNEED) {
1412 DRM_ERROR("Attempting to mmap a purgeable buffer\n"); 1412 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
@@ -1450,7 +1450,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1450void 1450void
1451i915_gem_object_put_pages(struct drm_gem_object *obj) 1451i915_gem_object_put_pages(struct drm_gem_object *obj)
1452{ 1452{
1453 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1453 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1454 int page_count = obj->size / PAGE_SIZE; 1454 int page_count = obj->size / PAGE_SIZE;
1455 int i; 1455 int i;
1456 1456
@@ -1486,7 +1486,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
1486{ 1486{
1487 struct drm_device *dev = obj->dev; 1487 struct drm_device *dev = obj->dev;
1488 drm_i915_private_t *dev_priv = dev->dev_private; 1488 drm_i915_private_t *dev_priv = dev->dev_private;
1489 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1489 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1490 1490
1491 /* Add a reference if we're newly entering the active list. */ 1491 /* Add a reference if we're newly entering the active list. */
1492 if (!obj_priv->active) { 1492 if (!obj_priv->active) {
@@ -1506,7 +1506,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1506{ 1506{
1507 struct drm_device *dev = obj->dev; 1507 struct drm_device *dev = obj->dev;
1508 drm_i915_private_t *dev_priv = dev->dev_private; 1508 drm_i915_private_t *dev_priv = dev->dev_private;
1509 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1509 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1510 1510
1511 BUG_ON(!obj_priv->active); 1511 BUG_ON(!obj_priv->active);
1512 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); 1512 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
@@ -1517,7 +1517,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1517static void 1517static void
1518i915_gem_object_truncate(struct drm_gem_object *obj) 1518i915_gem_object_truncate(struct drm_gem_object *obj)
1519{ 1519{
1520 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1520 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1521 struct inode *inode; 1521 struct inode *inode;
1522 1522
1523 inode = obj->filp->f_path.dentry->d_inode; 1523 inode = obj->filp->f_path.dentry->d_inode;
@@ -1538,7 +1538,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1538{ 1538{
1539 struct drm_device *dev = obj->dev; 1539 struct drm_device *dev = obj->dev;
1540 drm_i915_private_t *dev_priv = dev->dev_private; 1540 drm_i915_private_t *dev_priv = dev->dev_private;
1541 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1541 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1542 1542
1543 i915_verify_inactive(dev, __FILE__, __LINE__); 1543 i915_verify_inactive(dev, __FILE__, __LINE__);
1544 if (obj_priv->pin_count != 0) 1544 if (obj_priv->pin_count != 0)
@@ -1965,7 +1965,7 @@ static int
1965i915_gem_object_wait_rendering(struct drm_gem_object *obj) 1965i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1966{ 1966{
1967 struct drm_device *dev = obj->dev; 1967 struct drm_device *dev = obj->dev;
1968 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1968 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1969 int ret; 1969 int ret;
1970 1970
1971 /* This function only exists to support waiting for existing rendering, 1971 /* This function only exists to support waiting for existing rendering,
@@ -1997,7 +1997,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1997{ 1997{
1998 struct drm_device *dev = obj->dev; 1998 struct drm_device *dev = obj->dev;
1999 drm_i915_private_t *dev_priv = dev->dev_private; 1999 drm_i915_private_t *dev_priv = dev->dev_private;
2000 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2000 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2001 int ret = 0; 2001 int ret = 0;
2002 2002
2003#if WATCH_BUF 2003#if WATCH_BUF
@@ -2173,7 +2173,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2173#if WATCH_LRU 2173#if WATCH_LRU
2174 DRM_INFO("%s: evicting %p\n", __func__, obj); 2174 DRM_INFO("%s: evicting %p\n", __func__, obj);
2175#endif 2175#endif
2176 obj_priv = obj->driver_private; 2176 obj_priv = to_intel_bo(obj);
2177 BUG_ON(obj_priv->pin_count != 0); 2177 BUG_ON(obj_priv->pin_count != 0);
2178 BUG_ON(obj_priv->active); 2178 BUG_ON(obj_priv->active);
2179 2179
@@ -2244,7 +2244,7 @@ int
2244i915_gem_object_get_pages(struct drm_gem_object *obj, 2244i915_gem_object_get_pages(struct drm_gem_object *obj,
2245 gfp_t gfpmask) 2245 gfp_t gfpmask)
2246{ 2246{
2247 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2247 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2248 int page_count, i; 2248 int page_count, i;
2249 struct address_space *mapping; 2249 struct address_space *mapping;
2250 struct inode *inode; 2250 struct inode *inode;
@@ -2297,7 +2297,7 @@ static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
2297 struct drm_gem_object *obj = reg->obj; 2297 struct drm_gem_object *obj = reg->obj;
2298 struct drm_device *dev = obj->dev; 2298 struct drm_device *dev = obj->dev;
2299 drm_i915_private_t *dev_priv = dev->dev_private; 2299 drm_i915_private_t *dev_priv = dev->dev_private;
2300 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2300 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2301 int regnum = obj_priv->fence_reg; 2301 int regnum = obj_priv->fence_reg;
2302 uint64_t val; 2302 uint64_t val;
2303 2303
@@ -2319,7 +2319,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2319 struct drm_gem_object *obj = reg->obj; 2319 struct drm_gem_object *obj = reg->obj;
2320 struct drm_device *dev = obj->dev; 2320 struct drm_device *dev = obj->dev;
2321 drm_i915_private_t *dev_priv = dev->dev_private; 2321 drm_i915_private_t *dev_priv = dev->dev_private;
2322 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2322 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2323 int regnum = obj_priv->fence_reg; 2323 int regnum = obj_priv->fence_reg;
2324 uint64_t val; 2324 uint64_t val;
2325 2325
@@ -2339,7 +2339,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2339 struct drm_gem_object *obj = reg->obj; 2339 struct drm_gem_object *obj = reg->obj;
2340 struct drm_device *dev = obj->dev; 2340 struct drm_device *dev = obj->dev;
2341 drm_i915_private_t *dev_priv = dev->dev_private; 2341 drm_i915_private_t *dev_priv = dev->dev_private;
2342 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2342 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2343 int regnum = obj_priv->fence_reg; 2343 int regnum = obj_priv->fence_reg;
2344 int tile_width; 2344 int tile_width;
2345 uint32_t fence_reg, val; 2345 uint32_t fence_reg, val;
@@ -2381,7 +2381,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2381 struct drm_gem_object *obj = reg->obj; 2381 struct drm_gem_object *obj = reg->obj;
2382 struct drm_device *dev = obj->dev; 2382 struct drm_device *dev = obj->dev;
2383 drm_i915_private_t *dev_priv = dev->dev_private; 2383 drm_i915_private_t *dev_priv = dev->dev_private;
2384 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2384 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2385 int regnum = obj_priv->fence_reg; 2385 int regnum = obj_priv->fence_reg;
2386 uint32_t val; 2386 uint32_t val;
2387 uint32_t pitch_val; 2387 uint32_t pitch_val;
@@ -2425,7 +2425,7 @@ static int i915_find_fence_reg(struct drm_device *dev)
2425 if (!reg->obj) 2425 if (!reg->obj)
2426 return i; 2426 return i;
2427 2427
2428 obj_priv = reg->obj->driver_private; 2428 obj_priv = to_intel_bo(reg->obj);
2429 if (!obj_priv->pin_count) 2429 if (!obj_priv->pin_count)
2430 avail++; 2430 avail++;
2431 } 2431 }
@@ -2480,7 +2480,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2480{ 2480{
2481 struct drm_device *dev = obj->dev; 2481 struct drm_device *dev = obj->dev;
2482 struct drm_i915_private *dev_priv = dev->dev_private; 2482 struct drm_i915_private *dev_priv = dev->dev_private;
2483 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2483 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2484 struct drm_i915_fence_reg *reg = NULL; 2484 struct drm_i915_fence_reg *reg = NULL;
2485 int ret; 2485 int ret;
2486 2486
@@ -2547,7 +2547,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2547{ 2547{
2548 struct drm_device *dev = obj->dev; 2548 struct drm_device *dev = obj->dev;
2549 drm_i915_private_t *dev_priv = dev->dev_private; 2549 drm_i915_private_t *dev_priv = dev->dev_private;
2550 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2550 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2551 2551
2552 if (IS_GEN6(dev)) { 2552 if (IS_GEN6(dev)) {
2553 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + 2553 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
@@ -2583,7 +2583,7 @@ int
2583i915_gem_object_put_fence_reg(struct drm_gem_object *obj) 2583i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2584{ 2584{
2585 struct drm_device *dev = obj->dev; 2585 struct drm_device *dev = obj->dev;
2586 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2586 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2587 2587
2588 if (obj_priv->fence_reg == I915_FENCE_REG_NONE) 2588 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2589 return 0; 2589 return 0;
@@ -2621,7 +2621,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2621{ 2621{
2622 struct drm_device *dev = obj->dev; 2622 struct drm_device *dev = obj->dev;
2623 drm_i915_private_t *dev_priv = dev->dev_private; 2623 drm_i915_private_t *dev_priv = dev->dev_private;
2624 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2624 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2625 struct drm_mm_node *free_space; 2625 struct drm_mm_node *free_space;
2626 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; 2626 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2627 int ret; 2627 int ret;
@@ -2728,7 +2728,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2728void 2728void
2729i915_gem_clflush_object(struct drm_gem_object *obj) 2729i915_gem_clflush_object(struct drm_gem_object *obj)
2730{ 2730{
2731 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2731 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2732 2732
2733 /* If we don't have a page list set up, then we're not pinned 2733 /* If we don't have a page list set up, then we're not pinned
2734 * to GPU, and we can ignore the cache flush because it'll happen 2734 * to GPU, and we can ignore the cache flush because it'll happen
@@ -2829,7 +2829,7 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
2829int 2829int
2830i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) 2830i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2831{ 2831{
2832 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2832 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2833 uint32_t old_write_domain, old_read_domains; 2833 uint32_t old_write_domain, old_read_domains;
2834 int ret; 2834 int ret;
2835 2835
@@ -2879,7 +2879,7 @@ int
2879i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) 2879i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
2880{ 2880{
2881 struct drm_device *dev = obj->dev; 2881 struct drm_device *dev = obj->dev;
2882 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2882 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2883 uint32_t old_write_domain, old_read_domains; 2883 uint32_t old_write_domain, old_read_domains;
2884 int ret; 2884 int ret;
2885 2885
@@ -3092,7 +3092,7 @@ static void
3092i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) 3092i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
3093{ 3093{
3094 struct drm_device *dev = obj->dev; 3094 struct drm_device *dev = obj->dev;
3095 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3095 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3096 uint32_t invalidate_domains = 0; 3096 uint32_t invalidate_domains = 0;
3097 uint32_t flush_domains = 0; 3097 uint32_t flush_domains = 0;
3098 uint32_t old_read_domains; 3098 uint32_t old_read_domains;
@@ -3177,7 +3177,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
3177static void 3177static void
3178i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) 3178i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
3179{ 3179{
3180 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3180 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3181 3181
3182 if (!obj_priv->page_cpu_valid) 3182 if (!obj_priv->page_cpu_valid)
3183 return; 3183 return;
@@ -3217,7 +3217,7 @@ static int
3217i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, 3217i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3218 uint64_t offset, uint64_t size) 3218 uint64_t offset, uint64_t size)
3219{ 3219{
3220 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3220 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3221 uint32_t old_read_domains; 3221 uint32_t old_read_domains;
3222 int i, ret; 3222 int i, ret;
3223 3223
@@ -3286,7 +3286,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3286{ 3286{
3287 struct drm_device *dev = obj->dev; 3287 struct drm_device *dev = obj->dev;
3288 drm_i915_private_t *dev_priv = dev->dev_private; 3288 drm_i915_private_t *dev_priv = dev->dev_private;
3289 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3289 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3290 int i, ret; 3290 int i, ret;
3291 void __iomem *reloc_page; 3291 void __iomem *reloc_page;
3292 bool need_fence; 3292 bool need_fence;
@@ -3337,7 +3337,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3337 i915_gem_object_unpin(obj); 3337 i915_gem_object_unpin(obj);
3338 return -EBADF; 3338 return -EBADF;
3339 } 3339 }
3340 target_obj_priv = target_obj->driver_private; 3340 target_obj_priv = to_intel_bo(target_obj);
3341 3341
3342#if WATCH_RELOC 3342#if WATCH_RELOC
3343 DRM_INFO("%s: obj %p offset %08x target %d " 3343 DRM_INFO("%s: obj %p offset %08x target %d "
@@ -3689,7 +3689,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
3689 prepare_to_wait(&dev_priv->pending_flip_queue, 3689 prepare_to_wait(&dev_priv->pending_flip_queue,
3690 &wait, TASK_INTERRUPTIBLE); 3690 &wait, TASK_INTERRUPTIBLE);
3691 for (i = 0; i < count; i++) { 3691 for (i = 0; i < count; i++) {
3692 obj_priv = object_list[i]->driver_private; 3692 obj_priv = to_intel_bo(object_list[i]);
3693 if (atomic_read(&obj_priv->pending_flip) > 0) 3693 if (atomic_read(&obj_priv->pending_flip) > 0)
3694 break; 3694 break;
3695 } 3695 }
@@ -3798,7 +3798,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3798 goto err; 3798 goto err;
3799 } 3799 }
3800 3800
3801 obj_priv = object_list[i]->driver_private; 3801 obj_priv = to_intel_bo(object_list[i]);
3802 if (obj_priv->in_execbuffer) { 3802 if (obj_priv->in_execbuffer) {
3803 DRM_ERROR("Object %p appears more than once in object list\n", 3803 DRM_ERROR("Object %p appears more than once in object list\n",
3804 object_list[i]); 3804 object_list[i]);
@@ -3924,7 +3924,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3924 3924
3925 for (i = 0; i < args->buffer_count; i++) { 3925 for (i = 0; i < args->buffer_count; i++) {
3926 struct drm_gem_object *obj = object_list[i]; 3926 struct drm_gem_object *obj = object_list[i];
3927 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3927 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3928 uint32_t old_write_domain = obj->write_domain; 3928 uint32_t old_write_domain = obj->write_domain;
3929 3929
3930 obj->write_domain = obj->pending_write_domain; 3930 obj->write_domain = obj->pending_write_domain;
@@ -3999,7 +3999,7 @@ err:
3999 3999
4000 for (i = 0; i < args->buffer_count; i++) { 4000 for (i = 0; i < args->buffer_count; i++) {
4001 if (object_list[i]) { 4001 if (object_list[i]) {
4002 obj_priv = object_list[i]->driver_private; 4002 obj_priv = to_intel_bo(object_list[i]);
4003 obj_priv->in_execbuffer = false; 4003 obj_priv->in_execbuffer = false;
4004 } 4004 }
4005 drm_gem_object_unreference(object_list[i]); 4005 drm_gem_object_unreference(object_list[i]);
@@ -4177,7 +4177,7 @@ int
4177i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) 4177i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4178{ 4178{
4179 struct drm_device *dev = obj->dev; 4179 struct drm_device *dev = obj->dev;
4180 struct drm_i915_gem_object *obj_priv = obj->driver_private; 4180 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4181 int ret; 4181 int ret;
4182 4182
4183 i915_verify_inactive(dev, __FILE__, __LINE__); 4183 i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -4210,7 +4210,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
4210{ 4210{
4211 struct drm_device *dev = obj->dev; 4211 struct drm_device *dev = obj->dev;
4212 drm_i915_private_t *dev_priv = dev->dev_private; 4212 drm_i915_private_t *dev_priv = dev->dev_private;
4213 struct drm_i915_gem_object *obj_priv = obj->driver_private; 4213 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4214 4214
4215 i915_verify_inactive(dev, __FILE__, __LINE__); 4215 i915_verify_inactive(dev, __FILE__, __LINE__);
4216 obj_priv->pin_count--; 4216 obj_priv->pin_count--;
@@ -4250,7 +4250,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4250 mutex_unlock(&dev->struct_mutex); 4250 mutex_unlock(&dev->struct_mutex);
4251 return -EBADF; 4251 return -EBADF;
4252 } 4252 }
4253 obj_priv = obj->driver_private; 4253 obj_priv = to_intel_bo(obj);
4254 4254
4255 if (obj_priv->madv != I915_MADV_WILLNEED) { 4255 if (obj_priv->madv != I915_MADV_WILLNEED) {
4256 DRM_ERROR("Attempting to pin a purgeable buffer\n"); 4256 DRM_ERROR("Attempting to pin a purgeable buffer\n");
@@ -4307,7 +4307,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4307 return -EBADF; 4307 return -EBADF;
4308 } 4308 }
4309 4309
4310 obj_priv = obj->driver_private; 4310 obj_priv = to_intel_bo(obj);
4311 if (obj_priv->pin_filp != file_priv) { 4311 if (obj_priv->pin_filp != file_priv) {
4312 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", 4312 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4313 args->handle); 4313 args->handle);
@@ -4349,7 +4349,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4349 */ 4349 */
4350 i915_gem_retire_requests(dev); 4350 i915_gem_retire_requests(dev);
4351 4351
4352 obj_priv = obj->driver_private; 4352 obj_priv = to_intel_bo(obj);
4353 /* Don't count being on the flushing list against the object being 4353 /* Don't count being on the flushing list against the object being
4354 * done. Otherwise, a buffer left on the flushing list but not getting 4354 * done. Otherwise, a buffer left on the flushing list but not getting
4355 * flushed (because nobody's flushing that domain) won't ever return 4355 * flushed (because nobody's flushing that domain) won't ever return
@@ -4395,7 +4395,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4395 } 4395 }
4396 4396
4397 mutex_lock(&dev->struct_mutex); 4397 mutex_lock(&dev->struct_mutex);
4398 obj_priv = obj->driver_private; 4398 obj_priv = to_intel_bo(obj);
4399 4399
4400 if (obj_priv->pin_count) { 4400 if (obj_priv->pin_count) {
4401 drm_gem_object_unreference(obj); 4401 drm_gem_object_unreference(obj);
@@ -4456,7 +4456,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
4456void i915_gem_free_object(struct drm_gem_object *obj) 4456void i915_gem_free_object(struct drm_gem_object *obj)
4457{ 4457{
4458 struct drm_device *dev = obj->dev; 4458 struct drm_device *dev = obj->dev;
4459 struct drm_i915_gem_object *obj_priv = obj->driver_private; 4459 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4460 4460
4461 trace_i915_gem_object_destroy(obj); 4461 trace_i915_gem_object_destroy(obj);
4462 4462
@@ -4565,7 +4565,7 @@ i915_gem_init_hws(struct drm_device *dev)
4565 DRM_ERROR("Failed to allocate status page\n"); 4565 DRM_ERROR("Failed to allocate status page\n");
4566 return -ENOMEM; 4566 return -ENOMEM;
4567 } 4567 }
4568 obj_priv = obj->driver_private; 4568 obj_priv = to_intel_bo(obj);
4569 obj_priv->agp_type = AGP_USER_CACHED_MEMORY; 4569 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4570 4570
4571 ret = i915_gem_object_pin(obj, 4096); 4571 ret = i915_gem_object_pin(obj, 4096);
@@ -4609,7 +4609,7 @@ i915_gem_cleanup_hws(struct drm_device *dev)
4609 return; 4609 return;
4610 4610
4611 obj = dev_priv->hws_obj; 4611 obj = dev_priv->hws_obj;
4612 obj_priv = obj->driver_private; 4612 obj_priv = to_intel_bo(obj);
4613 4613
4614 kunmap(obj_priv->pages[0]); 4614 kunmap(obj_priv->pages[0]);
4615 i915_gem_object_unpin(obj); 4615 i915_gem_object_unpin(obj);
@@ -4643,7 +4643,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
4643 i915_gem_cleanup_hws(dev); 4643 i915_gem_cleanup_hws(dev);
4644 return -ENOMEM; 4644 return -ENOMEM;
4645 } 4645 }
4646 obj_priv = obj->driver_private; 4646 obj_priv = to_intel_bo(obj);
4647 4647
4648 ret = i915_gem_object_pin(obj, 4096); 4648 ret = i915_gem_object_pin(obj, 4096);
4649 if (ret != 0) { 4649 if (ret != 0) {
@@ -4936,7 +4936,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
4936 int ret; 4936 int ret;
4937 int page_count; 4937 int page_count;
4938 4938
4939 obj_priv = obj->driver_private; 4939 obj_priv = to_intel_bo(obj);
4940 if (!obj_priv->phys_obj) 4940 if (!obj_priv->phys_obj)
4941 return; 4941 return;
4942 4942
@@ -4975,7 +4975,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4975 if (id > I915_MAX_PHYS_OBJECT) 4975 if (id > I915_MAX_PHYS_OBJECT)
4976 return -EINVAL; 4976 return -EINVAL;
4977 4977
4978 obj_priv = obj->driver_private; 4978 obj_priv = to_intel_bo(obj);
4979 4979
4980 if (obj_priv->phys_obj) { 4980 if (obj_priv->phys_obj) {
4981 if (obj_priv->phys_obj->id == id) 4981 if (obj_priv->phys_obj->id == id)
@@ -5026,7 +5026,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
5026 struct drm_i915_gem_pwrite *args, 5026 struct drm_i915_gem_pwrite *args,
5027 struct drm_file *file_priv) 5027 struct drm_file *file_priv)
5028{ 5028{
5029 struct drm_i915_gem_object *obj_priv = obj->driver_private; 5029 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
5030 void *obj_addr; 5030 void *obj_addr;
5031 int ret; 5031 int ret;
5032 char __user *user_data; 5032 char __user *user_data;
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index e602614bd3f8..35507cf53fa3 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -72,7 +72,7 @@ void
72i915_gem_dump_object(struct drm_gem_object *obj, int len, 72i915_gem_dump_object(struct drm_gem_object *obj, int len,
73 const char *where, uint32_t mark) 73 const char *where, uint32_t mark)
74{ 74{
75 struct drm_i915_gem_object *obj_priv = obj->driver_private; 75 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
76 int page; 76 int page;
77 77
78 DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); 78 DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
@@ -137,7 +137,7 @@ void
137i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) 137i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
138{ 138{
139 struct drm_device *dev = obj->dev; 139 struct drm_device *dev = obj->dev;
140 struct drm_i915_gem_object *obj_priv = obj->driver_private; 140 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
141 int page; 141 int page;
142 uint32_t *gtt_mapping; 142 uint32_t *gtt_mapping;
143 uint32_t *backing_map = NULL; 143 uint32_t *backing_map = NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index c01c878e51ba..449157f71610 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -240,7 +240,7 @@ bool
240i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) 240i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
241{ 241{
242 struct drm_device *dev = obj->dev; 242 struct drm_device *dev = obj->dev;
243 struct drm_i915_gem_object *obj_priv = obj->driver_private; 243 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
244 244
245 if (obj_priv->gtt_space == NULL) 245 if (obj_priv->gtt_space == NULL)
246 return true; 246 return true;
@@ -280,7 +280,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
280 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 280 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
281 if (obj == NULL) 281 if (obj == NULL)
282 return -EINVAL; 282 return -EINVAL;
283 obj_priv = obj->driver_private; 283 obj_priv = to_intel_bo(obj);
284 284
285 if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { 285 if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
286 drm_gem_object_unreference_unlocked(obj); 286 drm_gem_object_unreference_unlocked(obj);
@@ -364,7 +364,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
364 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 364 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
365 if (obj == NULL) 365 if (obj == NULL)
366 return -EINVAL; 366 return -EINVAL;
367 obj_priv = obj->driver_private; 367 obj_priv = to_intel_bo(obj);
368 368
369 mutex_lock(&dev->struct_mutex); 369 mutex_lock(&dev->struct_mutex);
370 370
@@ -427,7 +427,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
427{ 427{
428 struct drm_device *dev = obj->dev; 428 struct drm_device *dev = obj->dev;
429 drm_i915_private_t *dev_priv = dev->dev_private; 429 drm_i915_private_t *dev_priv = dev->dev_private;
430 struct drm_i915_gem_object *obj_priv = obj->driver_private; 430 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
431 int page_count = obj->size >> PAGE_SHIFT; 431 int page_count = obj->size >> PAGE_SHIFT;
432 int i; 432 int i;
433 433
@@ -456,7 +456,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
456{ 456{
457 struct drm_device *dev = obj->dev; 457 struct drm_device *dev = obj->dev;
458 drm_i915_private_t *dev_priv = dev->dev_private; 458 drm_i915_private_t *dev_priv = dev->dev_private;
459 struct drm_i915_gem_object *obj_priv = obj->driver_private; 459 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
460 int page_count = obj->size >> PAGE_SHIFT; 460 int page_count = obj->size >> PAGE_SHIFT;
461 int i; 461 int i;
462 462
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 49c458bc6502..6421481d6222 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -260,10 +260,10 @@ static void i915_hotplug_work_func(struct work_struct *work)
260 260
261 if (mode_config->num_connector) { 261 if (mode_config->num_connector) {
262 list_for_each_entry(connector, &mode_config->connector_list, head) { 262 list_for_each_entry(connector, &mode_config->connector_list, head) {
263 struct intel_output *intel_output = to_intel_output(connector); 263 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
264 264
265 if (intel_output->hot_plug) 265 if (intel_encoder->hot_plug)
266 (*intel_output->hot_plug) (intel_output); 266 (*intel_encoder->hot_plug) (intel_encoder);
267 } 267 }
268 } 268 }
269 /* Just fire off a uevent and let userspace tell us what to do */ 269 /* Just fire off a uevent and let userspace tell us what to do */
@@ -444,7 +444,7 @@ i915_error_object_create(struct drm_device *dev,
444 if (src == NULL) 444 if (src == NULL)
445 return NULL; 445 return NULL;
446 446
447 src_priv = src->driver_private; 447 src_priv = to_intel_bo(src);
448 if (src_priv->pages == NULL) 448 if (src_priv->pages == NULL)
449 return NULL; 449 return NULL;
450 450
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 38110ce742a5..759c2ef72eff 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -247,19 +247,19 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
247 247
248static bool intel_crt_detect_ddc(struct drm_connector *connector) 248static bool intel_crt_detect_ddc(struct drm_connector *connector)
249{ 249{
250 struct intel_output *intel_output = to_intel_output(connector); 250 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
251 251
252 /* CRT should always be at 0, but check anyway */ 252 /* CRT should always be at 0, but check anyway */
253 if (intel_output->type != INTEL_OUTPUT_ANALOG) 253 if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
254 return false; 254 return false;
255 255
256 return intel_ddc_probe(intel_output); 256 return intel_ddc_probe(intel_encoder);
257} 257}
258 258
259static enum drm_connector_status 259static enum drm_connector_status
260intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output) 260intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
261{ 261{
262 struct drm_encoder *encoder = &intel_output->enc; 262 struct drm_encoder *encoder = &intel_encoder->enc;
263 struct drm_device *dev = encoder->dev; 263 struct drm_device *dev = encoder->dev;
264 struct drm_i915_private *dev_priv = dev->dev_private; 264 struct drm_i915_private *dev_priv = dev->dev_private;
265 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 265 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -387,8 +387,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output)
387static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) 387static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
388{ 388{
389 struct drm_device *dev = connector->dev; 389 struct drm_device *dev = connector->dev;
390 struct intel_output *intel_output = to_intel_output(connector); 390 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
391 struct drm_encoder *encoder = &intel_output->enc; 391 struct drm_encoder *encoder = &intel_encoder->enc;
392 struct drm_crtc *crtc; 392 struct drm_crtc *crtc;
393 int dpms_mode; 393 int dpms_mode;
394 enum drm_connector_status status; 394 enum drm_connector_status status;
@@ -405,13 +405,13 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
405 405
406 /* for pre-945g platforms use load detect */ 406 /* for pre-945g platforms use load detect */
407 if (encoder->crtc && encoder->crtc->enabled) { 407 if (encoder->crtc && encoder->crtc->enabled) {
408 status = intel_crt_load_detect(encoder->crtc, intel_output); 408 status = intel_crt_load_detect(encoder->crtc, intel_encoder);
409 } else { 409 } else {
410 crtc = intel_get_load_detect_pipe(intel_output, 410 crtc = intel_get_load_detect_pipe(intel_encoder,
411 NULL, &dpms_mode); 411 NULL, &dpms_mode);
412 if (crtc) { 412 if (crtc) {
413 status = intel_crt_load_detect(crtc, intel_output); 413 status = intel_crt_load_detect(crtc, intel_encoder);
414 intel_release_load_detect_pipe(intel_output, dpms_mode); 414 intel_release_load_detect_pipe(intel_encoder, dpms_mode);
415 } else 415 } else
416 status = connector_status_unknown; 416 status = connector_status_unknown;
417 } 417 }
@@ -421,9 +421,9 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
421 421
422static void intel_crt_destroy(struct drm_connector *connector) 422static void intel_crt_destroy(struct drm_connector *connector)
423{ 423{
424 struct intel_output *intel_output = to_intel_output(connector); 424 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
425 425
426 intel_i2c_destroy(intel_output->ddc_bus); 426 intel_i2c_destroy(intel_encoder->ddc_bus);
427 drm_sysfs_connector_remove(connector); 427 drm_sysfs_connector_remove(connector);
428 drm_connector_cleanup(connector); 428 drm_connector_cleanup(connector);
429 kfree(connector); 429 kfree(connector);
@@ -432,28 +432,28 @@ static void intel_crt_destroy(struct drm_connector *connector)
432static int intel_crt_get_modes(struct drm_connector *connector) 432static int intel_crt_get_modes(struct drm_connector *connector)
433{ 433{
434 int ret; 434 int ret;
435 struct intel_output *intel_output = to_intel_output(connector); 435 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
436 struct i2c_adapter *ddcbus; 436 struct i2c_adapter *ddcbus;
437 struct drm_device *dev = connector->dev; 437 struct drm_device *dev = connector->dev;
438 438
439 439
440 ret = intel_ddc_get_modes(intel_output); 440 ret = intel_ddc_get_modes(intel_encoder);
441 if (ret || !IS_G4X(dev)) 441 if (ret || !IS_G4X(dev))
442 goto end; 442 goto end;
443 443
444 ddcbus = intel_output->ddc_bus; 444 ddcbus = intel_encoder->ddc_bus;
445 /* Try to probe digital port for output in DVI-I -> VGA mode. */ 445 /* Try to probe digital port for output in DVI-I -> VGA mode. */
446 intel_output->ddc_bus = 446 intel_encoder->ddc_bus =
447 intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); 447 intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
448 448
449 if (!intel_output->ddc_bus) { 449 if (!intel_encoder->ddc_bus) {
450 intel_output->ddc_bus = ddcbus; 450 intel_encoder->ddc_bus = ddcbus;
451 dev_printk(KERN_ERR, &connector->dev->pdev->dev, 451 dev_printk(KERN_ERR, &connector->dev->pdev->dev,
452 "DDC bus registration failed for CRTDDC_D.\n"); 452 "DDC bus registration failed for CRTDDC_D.\n");
453 goto end; 453 goto end;
454 } 454 }
455 /* Try to get modes by GPIOD port */ 455 /* Try to get modes by GPIOD port */
456 ret = intel_ddc_get_modes(intel_output); 456 ret = intel_ddc_get_modes(intel_encoder);
457 intel_i2c_destroy(ddcbus); 457 intel_i2c_destroy(ddcbus);
458 458
459end: 459end:
@@ -506,23 +506,23 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
506void intel_crt_init(struct drm_device *dev) 506void intel_crt_init(struct drm_device *dev)
507{ 507{
508 struct drm_connector *connector; 508 struct drm_connector *connector;
509 struct intel_output *intel_output; 509 struct intel_encoder *intel_encoder;
510 struct drm_i915_private *dev_priv = dev->dev_private; 510 struct drm_i915_private *dev_priv = dev->dev_private;
511 u32 i2c_reg; 511 u32 i2c_reg;
512 512
513 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); 513 intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL);
514 if (!intel_output) 514 if (!intel_encoder)
515 return; 515 return;
516 516
517 connector = &intel_output->base; 517 connector = &intel_encoder->base;
518 drm_connector_init(dev, &intel_output->base, 518 drm_connector_init(dev, &intel_encoder->base,
519 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 519 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
520 520
521 drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs, 521 drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs,
522 DRM_MODE_ENCODER_DAC); 522 DRM_MODE_ENCODER_DAC);
523 523
524 drm_mode_connector_attach_encoder(&intel_output->base, 524 drm_mode_connector_attach_encoder(&intel_encoder->base,
525 &intel_output->enc); 525 &intel_encoder->enc);
526 526
527 /* Set up the DDC bus. */ 527 /* Set up the DDC bus. */
528 if (HAS_PCH_SPLIT(dev)) 528 if (HAS_PCH_SPLIT(dev))
@@ -533,22 +533,22 @@ void intel_crt_init(struct drm_device *dev)
533 if (dev_priv->crt_ddc_bus != 0) 533 if (dev_priv->crt_ddc_bus != 0)
534 i2c_reg = dev_priv->crt_ddc_bus; 534 i2c_reg = dev_priv->crt_ddc_bus;
535 } 535 }
536 intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); 536 intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
537 if (!intel_output->ddc_bus) { 537 if (!intel_encoder->ddc_bus) {
538 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " 538 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
539 "failed.\n"); 539 "failed.\n");
540 return; 540 return;
541 } 541 }
542 542
543 intel_output->type = INTEL_OUTPUT_ANALOG; 543 intel_encoder->type = INTEL_OUTPUT_ANALOG;
544 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 544 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
545 (1 << INTEL_ANALOG_CLONE_BIT) | 545 (1 << INTEL_ANALOG_CLONE_BIT) |
546 (1 << INTEL_SDVO_LVDS_CLONE_BIT); 546 (1 << INTEL_SDVO_LVDS_CLONE_BIT);
547 intel_output->crtc_mask = (1 << 0) | (1 << 1); 547 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
548 connector->interlace_allowed = 0; 548 connector->interlace_allowed = 0;
549 connector->doublescan_allowed = 0; 549 connector->doublescan_allowed = 0;
550 550
551 drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs); 551 drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs);
552 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 552 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
553 553
554 drm_sysfs_connector_add(connector); 554 drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e7e753b2845f..e7356fb6c918 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -747,16 +747,16 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
747 list_for_each_entry(l_entry, &mode_config->connector_list, head) { 747 list_for_each_entry(l_entry, &mode_config->connector_list, head) {
748 if (l_entry->encoder && 748 if (l_entry->encoder &&
749 l_entry->encoder->crtc == crtc) { 749 l_entry->encoder->crtc == crtc) {
750 struct intel_output *intel_output = to_intel_output(l_entry); 750 struct intel_encoder *intel_encoder = to_intel_encoder(l_entry);
751 if (intel_output->type == type) 751 if (intel_encoder->type == type)
752 return true; 752 return true;
753 } 753 }
754 } 754 }
755 return false; 755 return false;
756} 756}
757 757
758struct drm_connector * 758static struct drm_connector *
759intel_pipe_get_output (struct drm_crtc *crtc) 759intel_pipe_get_connector (struct drm_crtc *crtc)
760{ 760{
761 struct drm_device *dev = crtc->dev; 761 struct drm_device *dev = crtc->dev;
762 struct drm_mode_config *mode_config = &dev->mode_config; 762 struct drm_mode_config *mode_config = &dev->mode_config;
@@ -1003,7 +1003,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1003 struct drm_i915_private *dev_priv = dev->dev_private; 1003 struct drm_i915_private *dev_priv = dev->dev_private;
1004 struct drm_framebuffer *fb = crtc->fb; 1004 struct drm_framebuffer *fb = crtc->fb;
1005 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1005 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1006 struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; 1006 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
1007 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1007 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1008 int plane, i; 1008 int plane, i;
1009 u32 fbc_ctl, fbc_ctl2; 1009 u32 fbc_ctl, fbc_ctl2;
@@ -1080,7 +1080,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1080 struct drm_i915_private *dev_priv = dev->dev_private; 1080 struct drm_i915_private *dev_priv = dev->dev_private;
1081 struct drm_framebuffer *fb = crtc->fb; 1081 struct drm_framebuffer *fb = crtc->fb;
1082 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1082 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1083 struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; 1083 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
1084 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1084 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1085 int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : 1085 int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA :
1086 DPFC_CTL_PLANEB); 1086 DPFC_CTL_PLANEB);
@@ -1176,7 +1176,7 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1176 return; 1176 return;
1177 1177
1178 intel_fb = to_intel_framebuffer(fb); 1178 intel_fb = to_intel_framebuffer(fb);
1179 obj_priv = intel_fb->obj->driver_private; 1179 obj_priv = to_intel_bo(intel_fb->obj);
1180 1180
1181 /* 1181 /*
1182 * If FBC is already on, we just have to verify that we can 1182 * If FBC is already on, we just have to verify that we can
@@ -1243,7 +1243,7 @@ out_disable:
1243static int 1243static int
1244intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) 1244intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
1245{ 1245{
1246 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1246 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1247 u32 alignment; 1247 u32 alignment;
1248 int ret; 1248 int ret;
1249 1249
@@ -1323,7 +1323,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1323 1323
1324 intel_fb = to_intel_framebuffer(crtc->fb); 1324 intel_fb = to_intel_framebuffer(crtc->fb);
1325 obj = intel_fb->obj; 1325 obj = intel_fb->obj;
1326 obj_priv = obj->driver_private; 1326 obj_priv = to_intel_bo(obj);
1327 1327
1328 mutex_lock(&dev->struct_mutex); 1328 mutex_lock(&dev->struct_mutex);
1329 ret = intel_pin_and_fence_fb_obj(dev, obj); 1329 ret = intel_pin_and_fence_fb_obj(dev, obj);
@@ -1401,7 +1401,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1401 1401
1402 if (old_fb) { 1402 if (old_fb) {
1403 intel_fb = to_intel_framebuffer(old_fb); 1403 intel_fb = to_intel_framebuffer(old_fb);
1404 obj_priv = intel_fb->obj->driver_private; 1404 obj_priv = to_intel_bo(intel_fb->obj);
1405 i915_gem_object_unpin(intel_fb->obj); 1405 i915_gem_object_unpin(intel_fb->obj);
1406 } 1406 }
1407 intel_increase_pllclock(crtc, true); 1407 intel_increase_pllclock(crtc, true);
@@ -2917,7 +2917,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2917 int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE; 2917 int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE;
2918 int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS; 2918 int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS;
2919 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; 2919 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
2920 int refclk, num_outputs = 0; 2920 int refclk, num_connectors = 0;
2921 intel_clock_t clock, reduced_clock; 2921 intel_clock_t clock, reduced_clock;
2922 u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; 2922 u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
2923 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; 2923 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
@@ -2943,19 +2943,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2943 drm_vblank_pre_modeset(dev, pipe); 2943 drm_vblank_pre_modeset(dev, pipe);
2944 2944
2945 list_for_each_entry(connector, &mode_config->connector_list, head) { 2945 list_for_each_entry(connector, &mode_config->connector_list, head) {
2946 struct intel_output *intel_output = to_intel_output(connector); 2946 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
2947 2947
2948 if (!connector->encoder || connector->encoder->crtc != crtc) 2948 if (!connector->encoder || connector->encoder->crtc != crtc)
2949 continue; 2949 continue;
2950 2950
2951 switch (intel_output->type) { 2951 switch (intel_encoder->type) {
2952 case INTEL_OUTPUT_LVDS: 2952 case INTEL_OUTPUT_LVDS:
2953 is_lvds = true; 2953 is_lvds = true;
2954 break; 2954 break;
2955 case INTEL_OUTPUT_SDVO: 2955 case INTEL_OUTPUT_SDVO:
2956 case INTEL_OUTPUT_HDMI: 2956 case INTEL_OUTPUT_HDMI:
2957 is_sdvo = true; 2957 is_sdvo = true;
2958 if (intel_output->needs_tv_clock) 2958 if (intel_encoder->needs_tv_clock)
2959 is_tv = true; 2959 is_tv = true;
2960 break; 2960 break;
2961 case INTEL_OUTPUT_DVO: 2961 case INTEL_OUTPUT_DVO:
@@ -2975,10 +2975,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2975 break; 2975 break;
2976 } 2976 }
2977 2977
2978 num_outputs++; 2978 num_connectors++;
2979 } 2979 }
2980 2980
2981 if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) { 2981 if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) {
2982 refclk = dev_priv->lvds_ssc_freq * 1000; 2982 refclk = dev_priv->lvds_ssc_freq * 1000;
2983 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", 2983 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
2984 refclk / 1000); 2984 refclk / 1000);
@@ -3049,8 +3049,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3049 if (is_edp) { 3049 if (is_edp) {
3050 struct drm_connector *edp; 3050 struct drm_connector *edp;
3051 target_clock = mode->clock; 3051 target_clock = mode->clock;
3052 edp = intel_pipe_get_output(crtc); 3052 edp = intel_pipe_get_connector(crtc);
3053 intel_edp_link_config(to_intel_output(edp), 3053 intel_edp_link_config(to_intel_encoder(edp),
3054 &lane, &link_bw); 3054 &lane, &link_bw);
3055 } else { 3055 } else {
3056 /* DP over FDI requires target mode clock 3056 /* DP over FDI requires target mode clock
@@ -3231,7 +3231,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3231 /* XXX: just matching BIOS for now */ 3231 /* XXX: just matching BIOS for now */
3232 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 3232 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
3233 dpll |= 3; 3233 dpll |= 3;
3234 else if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) 3234 else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2)
3235 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 3235 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
3236 else 3236 else
3237 dpll |= PLL_REF_INPUT_DREFCLK; 3237 dpll |= PLL_REF_INPUT_DREFCLK;
@@ -3511,7 +3511,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3511 if (!bo) 3511 if (!bo)
3512 return -ENOENT; 3512 return -ENOENT;
3513 3513
3514 obj_priv = bo->driver_private; 3514 obj_priv = to_intel_bo(bo);
3515 3515
3516 if (bo->size < width * height * 4) { 3516 if (bo->size < width * height * 4) {
3517 DRM_ERROR("buffer is to small\n"); 3517 DRM_ERROR("buffer is to small\n");
@@ -3655,9 +3655,9 @@ static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
3655 * detection. 3655 * detection.
3656 * 3656 *
3657 * It will be up to the load-detect code to adjust the pipe as appropriate for 3657 * It will be up to the load-detect code to adjust the pipe as appropriate for
3658 * its requirements. The pipe will be connected to no other outputs. 3658 * its requirements. The pipe will be connected to no other encoders.
3659 * 3659 *
3660 * Currently this code will only succeed if there is a pipe with no outputs 3660 * Currently this code will only succeed if there is a pipe with no encoders
3661 * configured for it. In the future, it could choose to temporarily disable 3661 * configured for it. In the future, it could choose to temporarily disable
3662 * some outputs to free up a pipe for its use. 3662 * some outputs to free up a pipe for its use.
3663 * 3663 *
@@ -3670,14 +3670,14 @@ static struct drm_display_mode load_detect_mode = {
3670 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 3670 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
3671}; 3671};
3672 3672
3673struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, 3673struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
3674 struct drm_display_mode *mode, 3674 struct drm_display_mode *mode,
3675 int *dpms_mode) 3675 int *dpms_mode)
3676{ 3676{
3677 struct intel_crtc *intel_crtc; 3677 struct intel_crtc *intel_crtc;
3678 struct drm_crtc *possible_crtc; 3678 struct drm_crtc *possible_crtc;
3679 struct drm_crtc *supported_crtc =NULL; 3679 struct drm_crtc *supported_crtc =NULL;
3680 struct drm_encoder *encoder = &intel_output->enc; 3680 struct drm_encoder *encoder = &intel_encoder->enc;
3681 struct drm_crtc *crtc = NULL; 3681 struct drm_crtc *crtc = NULL;
3682 struct drm_device *dev = encoder->dev; 3682 struct drm_device *dev = encoder->dev;
3683 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 3683 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -3729,8 +3729,8 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
3729 } 3729 }
3730 3730
3731 encoder->crtc = crtc; 3731 encoder->crtc = crtc;
3732 intel_output->base.encoder = encoder; 3732 intel_encoder->base.encoder = encoder;
3733 intel_output->load_detect_temp = true; 3733 intel_encoder->load_detect_temp = true;
3734 3734
3735 intel_crtc = to_intel_crtc(crtc); 3735 intel_crtc = to_intel_crtc(crtc);
3736 *dpms_mode = intel_crtc->dpms_mode; 3736 *dpms_mode = intel_crtc->dpms_mode;
@@ -3755,23 +3755,23 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
3755 return crtc; 3755 return crtc;
3756} 3756}
3757 3757
3758void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_mode) 3758void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode)
3759{ 3759{
3760 struct drm_encoder *encoder = &intel_output->enc; 3760 struct drm_encoder *encoder = &intel_encoder->enc;
3761 struct drm_device *dev = encoder->dev; 3761 struct drm_device *dev = encoder->dev;
3762 struct drm_crtc *crtc = encoder->crtc; 3762 struct drm_crtc *crtc = encoder->crtc;
3763 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 3763 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3764 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 3764 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3765 3765
3766 if (intel_output->load_detect_temp) { 3766 if (intel_encoder->load_detect_temp) {
3767 encoder->crtc = NULL; 3767 encoder->crtc = NULL;
3768 intel_output->base.encoder = NULL; 3768 intel_encoder->base.encoder = NULL;
3769 intel_output->load_detect_temp = false; 3769 intel_encoder->load_detect_temp = false;
3770 crtc->enabled = drm_helper_crtc_in_use(crtc); 3770 crtc->enabled = drm_helper_crtc_in_use(crtc);
3771 drm_helper_disable_unused_functions(dev); 3771 drm_helper_disable_unused_functions(dev);
3772 } 3772 }
3773 3773
3774 /* Switch crtc and output back off if necessary */ 3774 /* Switch crtc and encoder back off if necessary */
3775 if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) { 3775 if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) {
3776 if (encoder->crtc == crtc) 3776 if (encoder->crtc == crtc)
3777 encoder_funcs->dpms(encoder, dpms_mode); 3777 encoder_funcs->dpms(encoder, dpms_mode);
@@ -4156,7 +4156,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4156 work = intel_crtc->unpin_work; 4156 work = intel_crtc->unpin_work;
4157 if (work == NULL || !work->pending) { 4157 if (work == NULL || !work->pending) {
4158 if (work && !work->pending) { 4158 if (work && !work->pending) {
4159 obj_priv = work->pending_flip_obj->driver_private; 4159 obj_priv = to_intel_bo(work->pending_flip_obj);
4160 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", 4160 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
4161 obj_priv, 4161 obj_priv,
4162 atomic_read(&obj_priv->pending_flip)); 4162 atomic_read(&obj_priv->pending_flip));
@@ -4181,7 +4181,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4181 4181
4182 spin_unlock_irqrestore(&dev->event_lock, flags); 4182 spin_unlock_irqrestore(&dev->event_lock, flags);
4183 4183
4184 obj_priv = work->pending_flip_obj->driver_private; 4184 obj_priv = to_intel_bo(work->pending_flip_obj);
4185 4185
4186 /* Initial scanout buffer will have a 0 pending flip count */ 4186 /* Initial scanout buffer will have a 0 pending flip count */
4187 if ((atomic_read(&obj_priv->pending_flip) == 0) || 4187 if ((atomic_read(&obj_priv->pending_flip) == 0) ||
@@ -4252,7 +4252,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4252 ret = intel_pin_and_fence_fb_obj(dev, obj); 4252 ret = intel_pin_and_fence_fb_obj(dev, obj);
4253 if (ret != 0) { 4253 if (ret != 0) {
4254 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", 4254 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
4255 obj->driver_private); 4255 to_intel_bo(obj));
4256 kfree(work); 4256 kfree(work);
4257 intel_crtc->unpin_work = NULL; 4257 intel_crtc->unpin_work = NULL;
4258 mutex_unlock(&dev->struct_mutex); 4258 mutex_unlock(&dev->struct_mutex);
@@ -4266,7 +4266,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4266 crtc->fb = fb; 4266 crtc->fb = fb;
4267 i915_gem_object_flush_write_domain(obj); 4267 i915_gem_object_flush_write_domain(obj);
4268 drm_vblank_get(dev, intel_crtc->pipe); 4268 drm_vblank_get(dev, intel_crtc->pipe);
4269 obj_priv = obj->driver_private; 4269 obj_priv = to_intel_bo(obj);
4270 atomic_inc(&obj_priv->pending_flip); 4270 atomic_inc(&obj_priv->pending_flip);
4271 work->pending_flip_obj = obj; 4271 work->pending_flip_obj = obj;
4272 4272
@@ -4399,8 +4399,8 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask)
4399 int entry = 0; 4399 int entry = 0;
4400 4400
4401 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 4401 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
4402 struct intel_output *intel_output = to_intel_output(connector); 4402 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
4403 if (type_mask & intel_output->clone_mask) 4403 if (type_mask & intel_encoder->clone_mask)
4404 index_mask |= (1 << entry); 4404 index_mask |= (1 << entry);
4405 entry++; 4405 entry++;
4406 } 4406 }
@@ -4495,12 +4495,12 @@ static void intel_setup_outputs(struct drm_device *dev)
4495 intel_tv_init(dev); 4495 intel_tv_init(dev);
4496 4496
4497 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 4497 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
4498 struct intel_output *intel_output = to_intel_output(connector); 4498 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
4499 struct drm_encoder *encoder = &intel_output->enc; 4499 struct drm_encoder *encoder = &intel_encoder->enc;
4500 4500
4501 encoder->possible_crtcs = intel_output->crtc_mask; 4501 encoder->possible_crtcs = intel_encoder->crtc_mask;
4502 encoder->possible_clones = intel_connector_clones(dev, 4502 encoder->possible_clones = intel_connector_clones(dev,
4503 intel_output->clone_mask); 4503 intel_encoder->clone_mask);
4504 } 4504 }
4505} 4505}
4506 4506
@@ -4779,14 +4779,14 @@ void intel_init_clock_gating(struct drm_device *dev)
4779 struct drm_i915_gem_object *obj_priv = NULL; 4779 struct drm_i915_gem_object *obj_priv = NULL;
4780 4780
4781 if (dev_priv->pwrctx) { 4781 if (dev_priv->pwrctx) {
4782 obj_priv = dev_priv->pwrctx->driver_private; 4782 obj_priv = to_intel_bo(dev_priv->pwrctx);
4783 } else { 4783 } else {
4784 struct drm_gem_object *pwrctx; 4784 struct drm_gem_object *pwrctx;
4785 4785
4786 pwrctx = intel_alloc_power_context(dev); 4786 pwrctx = intel_alloc_power_context(dev);
4787 if (pwrctx) { 4787 if (pwrctx) {
4788 dev_priv->pwrctx = pwrctx; 4788 dev_priv->pwrctx = pwrctx;
4789 obj_priv = pwrctx->driver_private; 4789 obj_priv = to_intel_bo(pwrctx);
4790 } 4790 }
4791 } 4791 }
4792 4792
@@ -4815,7 +4815,7 @@ static void intel_init_display(struct drm_device *dev)
4815 dev_priv->display.fbc_enabled = g4x_fbc_enabled; 4815 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
4816 dev_priv->display.enable_fbc = g4x_enable_fbc; 4816 dev_priv->display.enable_fbc = g4x_enable_fbc;
4817 dev_priv->display.disable_fbc = g4x_disable_fbc; 4817 dev_priv->display.disable_fbc = g4x_disable_fbc;
4818 } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) { 4818 } else if (IS_I965GM(dev)) {
4819 dev_priv->display.fbc_enabled = i8xx_fbc_enabled; 4819 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
4820 dev_priv->display.enable_fbc = i8xx_enable_fbc; 4820 dev_priv->display.enable_fbc = i8xx_enable_fbc;
4821 dev_priv->display.disable_fbc = i8xx_disable_fbc; 4821 dev_priv->display.disable_fbc = i8xx_disable_fbc;
@@ -4957,7 +4957,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
4957 if (dev_priv->pwrctx) { 4957 if (dev_priv->pwrctx) {
4958 struct drm_i915_gem_object *obj_priv; 4958 struct drm_i915_gem_object *obj_priv;
4959 4959
4960 obj_priv = dev_priv->pwrctx->driver_private; 4960 obj_priv = to_intel_bo(dev_priv->pwrctx);
4961 I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN); 4961 I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
4962 I915_READ(PWRCTXA); 4962 I915_READ(PWRCTXA);
4963 i915_gem_object_unpin(dev_priv->pwrctx); 4963 i915_gem_object_unpin(dev_priv->pwrctx);
@@ -4978,9 +4978,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
4978*/ 4978*/
4979struct drm_encoder *intel_best_encoder(struct drm_connector *connector) 4979struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
4980{ 4980{
4981 struct intel_output *intel_output = to_intel_output(connector); 4981 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
4982 4982
4983 return &intel_output->enc; 4983 return &intel_encoder->enc;
4984} 4984}
4985 4985
4986/* 4986/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 8e283f75941d..77e40cfcf216 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -55,23 +55,23 @@ struct intel_dp_priv {
55 uint8_t link_bw; 55 uint8_t link_bw;
56 uint8_t lane_count; 56 uint8_t lane_count;
57 uint8_t dpcd[4]; 57 uint8_t dpcd[4];
58 struct intel_output *intel_output; 58 struct intel_encoder *intel_encoder;
59 struct i2c_adapter adapter; 59 struct i2c_adapter adapter;
60 struct i2c_algo_dp_aux_data algo; 60 struct i2c_algo_dp_aux_data algo;
61}; 61};
62 62
63static void 63static void
64intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, 64intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
65 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]); 65 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]);
66 66
67static void 67static void
68intel_dp_link_down(struct intel_output *intel_output, uint32_t DP); 68intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP);
69 69
70void 70void
71intel_edp_link_config (struct intel_output *intel_output, 71intel_edp_link_config (struct intel_encoder *intel_encoder,
72 int *lane_num, int *link_bw) 72 int *lane_num, int *link_bw)
73{ 73{
74 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 74 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
75 75
76 *lane_num = dp_priv->lane_count; 76 *lane_num = dp_priv->lane_count;
77 if (dp_priv->link_bw == DP_LINK_BW_1_62) 77 if (dp_priv->link_bw == DP_LINK_BW_1_62)
@@ -81,9 +81,9 @@ intel_edp_link_config (struct intel_output *intel_output,
81} 81}
82 82
83static int 83static int
84intel_dp_max_lane_count(struct intel_output *intel_output) 84intel_dp_max_lane_count(struct intel_encoder *intel_encoder)
85{ 85{
86 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 86 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
87 int max_lane_count = 4; 87 int max_lane_count = 4;
88 88
89 if (dp_priv->dpcd[0] >= 0x11) { 89 if (dp_priv->dpcd[0] >= 0x11) {
@@ -99,9 +99,9 @@ intel_dp_max_lane_count(struct intel_output *intel_output)
99} 99}
100 100
101static int 101static int
102intel_dp_max_link_bw(struct intel_output *intel_output) 102intel_dp_max_link_bw(struct intel_encoder *intel_encoder)
103{ 103{
104 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 104 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
105 int max_link_bw = dp_priv->dpcd[1]; 105 int max_link_bw = dp_priv->dpcd[1];
106 106
107 switch (max_link_bw) { 107 switch (max_link_bw) {
@@ -127,11 +127,11 @@ intel_dp_link_clock(uint8_t link_bw)
127/* I think this is a fiction */ 127/* I think this is a fiction */
128static int 128static int
129intel_dp_link_required(struct drm_device *dev, 129intel_dp_link_required(struct drm_device *dev,
130 struct intel_output *intel_output, int pixel_clock) 130 struct intel_encoder *intel_encoder, int pixel_clock)
131{ 131{
132 struct drm_i915_private *dev_priv = dev->dev_private; 132 struct drm_i915_private *dev_priv = dev->dev_private;
133 133
134 if (IS_eDP(intel_output)) 134 if (IS_eDP(intel_encoder))
135 return (pixel_clock * dev_priv->edp_bpp) / 8; 135 return (pixel_clock * dev_priv->edp_bpp) / 8;
136 else 136 else
137 return pixel_clock * 3; 137 return pixel_clock * 3;
@@ -141,11 +141,11 @@ static int
141intel_dp_mode_valid(struct drm_connector *connector, 141intel_dp_mode_valid(struct drm_connector *connector,
142 struct drm_display_mode *mode) 142 struct drm_display_mode *mode)
143{ 143{
144 struct intel_output *intel_output = to_intel_output(connector); 144 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
145 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output)); 145 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
146 int max_lanes = intel_dp_max_lane_count(intel_output); 146 int max_lanes = intel_dp_max_lane_count(intel_encoder);
147 147
148 if (intel_dp_link_required(connector->dev, intel_output, mode->clock) 148 if (intel_dp_link_required(connector->dev, intel_encoder, mode->clock)
149 > max_link_clock * max_lanes) 149 > max_link_clock * max_lanes)
150 return MODE_CLOCK_HIGH; 150 return MODE_CLOCK_HIGH;
151 151
@@ -209,13 +209,13 @@ intel_hrawclk(struct drm_device *dev)
209} 209}
210 210
211static int 211static int
212intel_dp_aux_ch(struct intel_output *intel_output, 212intel_dp_aux_ch(struct intel_encoder *intel_encoder,
213 uint8_t *send, int send_bytes, 213 uint8_t *send, int send_bytes,
214 uint8_t *recv, int recv_size) 214 uint8_t *recv, int recv_size)
215{ 215{
216 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 216 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
217 uint32_t output_reg = dp_priv->output_reg; 217 uint32_t output_reg = dp_priv->output_reg;
218 struct drm_device *dev = intel_output->base.dev; 218 struct drm_device *dev = intel_encoder->base.dev;
219 struct drm_i915_private *dev_priv = dev->dev_private; 219 struct drm_i915_private *dev_priv = dev->dev_private;
220 uint32_t ch_ctl = output_reg + 0x10; 220 uint32_t ch_ctl = output_reg + 0x10;
221 uint32_t ch_data = ch_ctl + 4; 221 uint32_t ch_data = ch_ctl + 4;
@@ -230,7 +230,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
230 * and would like to run at 2MHz. So, take the 230 * and would like to run at 2MHz. So, take the
231 * hrawclk value and divide by 2 and use that 231 * hrawclk value and divide by 2 and use that
232 */ 232 */
233 if (IS_eDP(intel_output)) 233 if (IS_eDP(intel_encoder))
234 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 234 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
235 else if (HAS_PCH_SPLIT(dev)) 235 else if (HAS_PCH_SPLIT(dev))
236 aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ 236 aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
@@ -313,7 +313,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
313 313
314/* Write data to the aux channel in native mode */ 314/* Write data to the aux channel in native mode */
315static int 315static int
316intel_dp_aux_native_write(struct intel_output *intel_output, 316intel_dp_aux_native_write(struct intel_encoder *intel_encoder,
317 uint16_t address, uint8_t *send, int send_bytes) 317 uint16_t address, uint8_t *send, int send_bytes)
318{ 318{
319 int ret; 319 int ret;
@@ -330,7 +330,7 @@ intel_dp_aux_native_write(struct intel_output *intel_output,
330 memcpy(&msg[4], send, send_bytes); 330 memcpy(&msg[4], send, send_bytes);
331 msg_bytes = send_bytes + 4; 331 msg_bytes = send_bytes + 4;
332 for (;;) { 332 for (;;) {
333 ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1); 333 ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, &ack, 1);
334 if (ret < 0) 334 if (ret < 0)
335 return ret; 335 return ret;
336 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 336 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
@@ -345,15 +345,15 @@ intel_dp_aux_native_write(struct intel_output *intel_output,
345 345
346/* Write a single byte to the aux channel in native mode */ 346/* Write a single byte to the aux channel in native mode */
347static int 347static int
348intel_dp_aux_native_write_1(struct intel_output *intel_output, 348intel_dp_aux_native_write_1(struct intel_encoder *intel_encoder,
349 uint16_t address, uint8_t byte) 349 uint16_t address, uint8_t byte)
350{ 350{
351 return intel_dp_aux_native_write(intel_output, address, &byte, 1); 351 return intel_dp_aux_native_write(intel_encoder, address, &byte, 1);
352} 352}
353 353
354/* read bytes from a native aux channel */ 354/* read bytes from a native aux channel */
355static int 355static int
356intel_dp_aux_native_read(struct intel_output *intel_output, 356intel_dp_aux_native_read(struct intel_encoder *intel_encoder,
357 uint16_t address, uint8_t *recv, int recv_bytes) 357 uint16_t address, uint8_t *recv, int recv_bytes)
358{ 358{
359 uint8_t msg[4]; 359 uint8_t msg[4];
@@ -372,7 +372,7 @@ intel_dp_aux_native_read(struct intel_output *intel_output,
372 reply_bytes = recv_bytes + 1; 372 reply_bytes = recv_bytes + 1;
373 373
374 for (;;) { 374 for (;;) {
375 ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, 375 ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes,
376 reply, reply_bytes); 376 reply, reply_bytes);
377 if (ret == 0) 377 if (ret == 0)
378 return -EPROTO; 378 return -EPROTO;
@@ -398,7 +398,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
398 struct intel_dp_priv *dp_priv = container_of(adapter, 398 struct intel_dp_priv *dp_priv = container_of(adapter,
399 struct intel_dp_priv, 399 struct intel_dp_priv,
400 adapter); 400 adapter);
401 struct intel_output *intel_output = dp_priv->intel_output; 401 struct intel_encoder *intel_encoder = dp_priv->intel_encoder;
402 uint16_t address = algo_data->address; 402 uint16_t address = algo_data->address;
403 uint8_t msg[5]; 403 uint8_t msg[5];
404 uint8_t reply[2]; 404 uint8_t reply[2];
@@ -437,7 +437,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
437 } 437 }
438 438
439 for (;;) { 439 for (;;) {
440 ret = intel_dp_aux_ch(intel_output, 440 ret = intel_dp_aux_ch(intel_encoder,
441 msg, msg_bytes, 441 msg, msg_bytes,
442 reply, reply_bytes); 442 reply, reply_bytes);
443 if (ret < 0) { 443 if (ret < 0) {
@@ -465,9 +465,9 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
465} 465}
466 466
467static int 467static int
468intel_dp_i2c_init(struct intel_output *intel_output, const char *name) 468intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name)
469{ 469{
470 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 470 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
471 471
472 DRM_DEBUG_KMS("i2c_init %s\n", name); 472 DRM_DEBUG_KMS("i2c_init %s\n", name);
473 dp_priv->algo.running = false; 473 dp_priv->algo.running = false;
@@ -480,7 +480,7 @@ intel_dp_i2c_init(struct intel_output *intel_output, const char *name)
480 strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); 480 strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
481 dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; 481 dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
482 dp_priv->adapter.algo_data = &dp_priv->algo; 482 dp_priv->adapter.algo_data = &dp_priv->algo;
483 dp_priv->adapter.dev.parent = &intel_output->base.kdev; 483 dp_priv->adapter.dev.parent = &intel_encoder->base.kdev;
484 484
485 return i2c_dp_aux_add_bus(&dp_priv->adapter); 485 return i2c_dp_aux_add_bus(&dp_priv->adapter);
486} 486}
@@ -489,18 +489,18 @@ static bool
489intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, 489intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
490 struct drm_display_mode *adjusted_mode) 490 struct drm_display_mode *adjusted_mode)
491{ 491{
492 struct intel_output *intel_output = enc_to_intel_output(encoder); 492 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
493 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 493 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
494 int lane_count, clock; 494 int lane_count, clock;
495 int max_lane_count = intel_dp_max_lane_count(intel_output); 495 int max_lane_count = intel_dp_max_lane_count(intel_encoder);
496 int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0; 496 int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
497 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 497 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
498 498
499 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 499 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
500 for (clock = 0; clock <= max_clock; clock++) { 500 for (clock = 0; clock <= max_clock; clock++) {
501 int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; 501 int link_avail = intel_dp_link_clock(bws[clock]) * lane_count;
502 502
503 if (intel_dp_link_required(encoder->dev, intel_output, mode->clock) 503 if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock)
504 <= link_avail) { 504 <= link_avail) {
505 dp_priv->link_bw = bws[clock]; 505 dp_priv->link_bw = bws[clock];
506 dp_priv->lane_count = lane_count; 506 dp_priv->lane_count = lane_count;
@@ -562,16 +562,16 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
562 struct intel_dp_m_n m_n; 562 struct intel_dp_m_n m_n;
563 563
564 /* 564 /*
565 * Find the lane count in the intel_output private 565 * Find the lane count in the intel_encoder private
566 */ 566 */
567 list_for_each_entry(connector, &mode_config->connector_list, head) { 567 list_for_each_entry(connector, &mode_config->connector_list, head) {
568 struct intel_output *intel_output = to_intel_output(connector); 568 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
569 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 569 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
570 570
571 if (!connector->encoder || connector->encoder->crtc != crtc) 571 if (!connector->encoder || connector->encoder->crtc != crtc)
572 continue; 572 continue;
573 573
574 if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) { 574 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
575 lane_count = dp_priv->lane_count; 575 lane_count = dp_priv->lane_count;
576 break; 576 break;
577 } 577 }
@@ -626,9 +626,9 @@ static void
626intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 626intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
627 struct drm_display_mode *adjusted_mode) 627 struct drm_display_mode *adjusted_mode)
628{ 628{
629 struct intel_output *intel_output = enc_to_intel_output(encoder); 629 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
630 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 630 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
631 struct drm_crtc *crtc = intel_output->enc.crtc; 631 struct drm_crtc *crtc = intel_encoder->enc.crtc;
632 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 632 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
633 633
634 dp_priv->DP = (DP_LINK_TRAIN_OFF | 634 dp_priv->DP = (DP_LINK_TRAIN_OFF |
@@ -667,7 +667,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
667 if (intel_crtc->pipe == 1) 667 if (intel_crtc->pipe == 1)
668 dp_priv->DP |= DP_PIPEB_SELECT; 668 dp_priv->DP |= DP_PIPEB_SELECT;
669 669
670 if (IS_eDP(intel_output)) { 670 if (IS_eDP(intel_encoder)) {
671 /* don't miss out required setting for eDP */ 671 /* don't miss out required setting for eDP */
672 dp_priv->DP |= DP_PLL_ENABLE; 672 dp_priv->DP |= DP_PLL_ENABLE;
673 if (adjusted_mode->clock < 200000) 673 if (adjusted_mode->clock < 200000)
@@ -702,22 +702,22 @@ static void ironlake_edp_backlight_off (struct drm_device *dev)
702static void 702static void
703intel_dp_dpms(struct drm_encoder *encoder, int mode) 703intel_dp_dpms(struct drm_encoder *encoder, int mode)
704{ 704{
705 struct intel_output *intel_output = enc_to_intel_output(encoder); 705 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
706 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 706 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
707 struct drm_device *dev = intel_output->base.dev; 707 struct drm_device *dev = intel_encoder->base.dev;
708 struct drm_i915_private *dev_priv = dev->dev_private; 708 struct drm_i915_private *dev_priv = dev->dev_private;
709 uint32_t dp_reg = I915_READ(dp_priv->output_reg); 709 uint32_t dp_reg = I915_READ(dp_priv->output_reg);
710 710
711 if (mode != DRM_MODE_DPMS_ON) { 711 if (mode != DRM_MODE_DPMS_ON) {
712 if (dp_reg & DP_PORT_EN) { 712 if (dp_reg & DP_PORT_EN) {
713 intel_dp_link_down(intel_output, dp_priv->DP); 713 intel_dp_link_down(intel_encoder, dp_priv->DP);
714 if (IS_eDP(intel_output)) 714 if (IS_eDP(intel_encoder))
715 ironlake_edp_backlight_off(dev); 715 ironlake_edp_backlight_off(dev);
716 } 716 }
717 } else { 717 } else {
718 if (!(dp_reg & DP_PORT_EN)) { 718 if (!(dp_reg & DP_PORT_EN)) {
719 intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); 719 intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
720 if (IS_eDP(intel_output)) 720 if (IS_eDP(intel_encoder))
721 ironlake_edp_backlight_on(dev); 721 ironlake_edp_backlight_on(dev);
722 } 722 }
723 } 723 }
@@ -729,12 +729,12 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
729 * link status information 729 * link status information
730 */ 730 */
731static bool 731static bool
732intel_dp_get_link_status(struct intel_output *intel_output, 732intel_dp_get_link_status(struct intel_encoder *intel_encoder,
733 uint8_t link_status[DP_LINK_STATUS_SIZE]) 733 uint8_t link_status[DP_LINK_STATUS_SIZE])
734{ 734{
735 int ret; 735 int ret;
736 736
737 ret = intel_dp_aux_native_read(intel_output, 737 ret = intel_dp_aux_native_read(intel_encoder,
738 DP_LANE0_1_STATUS, 738 DP_LANE0_1_STATUS,
739 link_status, DP_LINK_STATUS_SIZE); 739 link_status, DP_LINK_STATUS_SIZE);
740 if (ret != DP_LINK_STATUS_SIZE) 740 if (ret != DP_LINK_STATUS_SIZE)
@@ -752,13 +752,13 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
752static void 752static void
753intel_dp_save(struct drm_connector *connector) 753intel_dp_save(struct drm_connector *connector)
754{ 754{
755 struct intel_output *intel_output = to_intel_output(connector); 755 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
756 struct drm_device *dev = intel_output->base.dev; 756 struct drm_device *dev = intel_encoder->base.dev;
757 struct drm_i915_private *dev_priv = dev->dev_private; 757 struct drm_i915_private *dev_priv = dev->dev_private;
758 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 758 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
759 759
760 dp_priv->save_DP = I915_READ(dp_priv->output_reg); 760 dp_priv->save_DP = I915_READ(dp_priv->output_reg);
761 intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET, 761 intel_dp_aux_native_read(intel_encoder, DP_LINK_BW_SET,
762 dp_priv->save_link_configuration, 762 dp_priv->save_link_configuration,
763 sizeof (dp_priv->save_link_configuration)); 763 sizeof (dp_priv->save_link_configuration));
764} 764}
@@ -825,7 +825,7 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
825} 825}
826 826
827static void 827static void
828intel_get_adjust_train(struct intel_output *intel_output, 828intel_get_adjust_train(struct intel_encoder *intel_encoder,
829 uint8_t link_status[DP_LINK_STATUS_SIZE], 829 uint8_t link_status[DP_LINK_STATUS_SIZE],
830 int lane_count, 830 int lane_count,
831 uint8_t train_set[4]) 831 uint8_t train_set[4])
@@ -942,15 +942,15 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
942} 942}
943 943
944static bool 944static bool
945intel_dp_set_link_train(struct intel_output *intel_output, 945intel_dp_set_link_train(struct intel_encoder *intel_encoder,
946 uint32_t dp_reg_value, 946 uint32_t dp_reg_value,
947 uint8_t dp_train_pat, 947 uint8_t dp_train_pat,
948 uint8_t train_set[4], 948 uint8_t train_set[4],
949 bool first) 949 bool first)
950{ 950{
951 struct drm_device *dev = intel_output->base.dev; 951 struct drm_device *dev = intel_encoder->base.dev;
952 struct drm_i915_private *dev_priv = dev->dev_private; 952 struct drm_i915_private *dev_priv = dev->dev_private;
953 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 953 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
954 int ret; 954 int ret;
955 955
956 I915_WRITE(dp_priv->output_reg, dp_reg_value); 956 I915_WRITE(dp_priv->output_reg, dp_reg_value);
@@ -958,11 +958,11 @@ intel_dp_set_link_train(struct intel_output *intel_output,
958 if (first) 958 if (first)
959 intel_wait_for_vblank(dev); 959 intel_wait_for_vblank(dev);
960 960
961 intel_dp_aux_native_write_1(intel_output, 961 intel_dp_aux_native_write_1(intel_encoder,
962 DP_TRAINING_PATTERN_SET, 962 DP_TRAINING_PATTERN_SET,
963 dp_train_pat); 963 dp_train_pat);
964 964
965 ret = intel_dp_aux_native_write(intel_output, 965 ret = intel_dp_aux_native_write(intel_encoder,
966 DP_TRAINING_LANE0_SET, train_set, 4); 966 DP_TRAINING_LANE0_SET, train_set, 4);
967 if (ret != 4) 967 if (ret != 4)
968 return false; 968 return false;
@@ -971,12 +971,12 @@ intel_dp_set_link_train(struct intel_output *intel_output,
971} 971}
972 972
973static void 973static void
974intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, 974intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
975 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) 975 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
976{ 976{
977 struct drm_device *dev = intel_output->base.dev; 977 struct drm_device *dev = intel_encoder->base.dev;
978 struct drm_i915_private *dev_priv = dev->dev_private; 978 struct drm_i915_private *dev_priv = dev->dev_private;
979 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 979 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
980 uint8_t train_set[4]; 980 uint8_t train_set[4];
981 uint8_t link_status[DP_LINK_STATUS_SIZE]; 981 uint8_t link_status[DP_LINK_STATUS_SIZE];
982 int i; 982 int i;
@@ -987,7 +987,7 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
987 int tries; 987 int tries;
988 988
989 /* Write the link configuration data */ 989 /* Write the link configuration data */
990 intel_dp_aux_native_write(intel_output, 0x100, 990 intel_dp_aux_native_write(intel_encoder, 0x100,
991 link_configuration, DP_LINK_CONFIGURATION_SIZE); 991 link_configuration, DP_LINK_CONFIGURATION_SIZE);
992 992
993 DP |= DP_PORT_EN; 993 DP |= DP_PORT_EN;
@@ -1001,14 +1001,14 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
1001 uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); 1001 uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
1002 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1002 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1003 1003
1004 if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1, 1004 if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_1,
1005 DP_TRAINING_PATTERN_1, train_set, first)) 1005 DP_TRAINING_PATTERN_1, train_set, first))
1006 break; 1006 break;
1007 first = false; 1007 first = false;
1008 /* Set training pattern 1 */ 1008 /* Set training pattern 1 */
1009 1009
1010 udelay(100); 1010 udelay(100);
1011 if (!intel_dp_get_link_status(intel_output, link_status)) 1011 if (!intel_dp_get_link_status(intel_encoder, link_status))
1012 break; 1012 break;
1013 1013
1014 if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) { 1014 if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) {
@@ -1033,7 +1033,7 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
1033 voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1033 voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1034 1034
1035 /* Compute new train_set as requested by target */ 1035 /* Compute new train_set as requested by target */
1036 intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); 1036 intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
1037 } 1037 }
1038 1038
1039 /* channel equalization */ 1039 /* channel equalization */
@@ -1045,13 +1045,13 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
1045 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1045 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1046 1046
1047 /* channel eq pattern */ 1047 /* channel eq pattern */
1048 if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2, 1048 if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_2,
1049 DP_TRAINING_PATTERN_2, train_set, 1049 DP_TRAINING_PATTERN_2, train_set,
1050 false)) 1050 false))
1051 break; 1051 break;
1052 1052
1053 udelay(400); 1053 udelay(400);
1054 if (!intel_dp_get_link_status(intel_output, link_status)) 1054 if (!intel_dp_get_link_status(intel_encoder, link_status))
1055 break; 1055 break;
1056 1056
1057 if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) { 1057 if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) {
@@ -1064,26 +1064,26 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
1064 break; 1064 break;
1065 1065
1066 /* Compute new train_set as requested by target */ 1066 /* Compute new train_set as requested by target */
1067 intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); 1067 intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
1068 ++tries; 1068 ++tries;
1069 } 1069 }
1070 1070
1071 I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF); 1071 I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF);
1072 POSTING_READ(dp_priv->output_reg); 1072 POSTING_READ(dp_priv->output_reg);
1073 intel_dp_aux_native_write_1(intel_output, 1073 intel_dp_aux_native_write_1(intel_encoder,
1074 DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); 1074 DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
1075} 1075}
1076 1076
1077static void 1077static void
1078intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) 1078intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
1079{ 1079{
1080 struct drm_device *dev = intel_output->base.dev; 1080 struct drm_device *dev = intel_encoder->base.dev;
1081 struct drm_i915_private *dev_priv = dev->dev_private; 1081 struct drm_i915_private *dev_priv = dev->dev_private;
1082 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1082 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1083 1083
1084 DRM_DEBUG_KMS("\n"); 1084 DRM_DEBUG_KMS("\n");
1085 1085
1086 if (IS_eDP(intel_output)) { 1086 if (IS_eDP(intel_encoder)) {
1087 DP &= ~DP_PLL_ENABLE; 1087 DP &= ~DP_PLL_ENABLE;
1088 I915_WRITE(dp_priv->output_reg, DP); 1088 I915_WRITE(dp_priv->output_reg, DP);
1089 POSTING_READ(dp_priv->output_reg); 1089 POSTING_READ(dp_priv->output_reg);
@@ -1096,7 +1096,7 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
1096 1096
1097 udelay(17000); 1097 udelay(17000);
1098 1098
1099 if (IS_eDP(intel_output)) 1099 if (IS_eDP(intel_encoder))
1100 DP |= DP_LINK_TRAIN_OFF; 1100 DP |= DP_LINK_TRAIN_OFF;
1101 I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN); 1101 I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN);
1102 POSTING_READ(dp_priv->output_reg); 1102 POSTING_READ(dp_priv->output_reg);
@@ -1105,13 +1105,13 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
1105static void 1105static void
1106intel_dp_restore(struct drm_connector *connector) 1106intel_dp_restore(struct drm_connector *connector)
1107{ 1107{
1108 struct intel_output *intel_output = to_intel_output(connector); 1108 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1109 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1109 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1110 1110
1111 if (dp_priv->save_DP & DP_PORT_EN) 1111 if (dp_priv->save_DP & DP_PORT_EN)
1112 intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration); 1112 intel_dp_link_train(intel_encoder, dp_priv->save_DP, dp_priv->save_link_configuration);
1113 else 1113 else
1114 intel_dp_link_down(intel_output, dp_priv->save_DP); 1114 intel_dp_link_down(intel_encoder, dp_priv->save_DP);
1115} 1115}
1116 1116
1117/* 1117/*
@@ -1124,32 +1124,32 @@ intel_dp_restore(struct drm_connector *connector)
1124 */ 1124 */
1125 1125
1126static void 1126static void
1127intel_dp_check_link_status(struct intel_output *intel_output) 1127intel_dp_check_link_status(struct intel_encoder *intel_encoder)
1128{ 1128{
1129 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1129 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1130 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1130 uint8_t link_status[DP_LINK_STATUS_SIZE];
1131 1131
1132 if (!intel_output->enc.crtc) 1132 if (!intel_encoder->enc.crtc)
1133 return; 1133 return;
1134 1134
1135 if (!intel_dp_get_link_status(intel_output, link_status)) { 1135 if (!intel_dp_get_link_status(intel_encoder, link_status)) {
1136 intel_dp_link_down(intel_output, dp_priv->DP); 1136 intel_dp_link_down(intel_encoder, dp_priv->DP);
1137 return; 1137 return;
1138 } 1138 }
1139 1139
1140 if (!intel_channel_eq_ok(link_status, dp_priv->lane_count)) 1140 if (!intel_channel_eq_ok(link_status, dp_priv->lane_count))
1141 intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); 1141 intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
1142} 1142}
1143 1143
1144static enum drm_connector_status 1144static enum drm_connector_status
1145ironlake_dp_detect(struct drm_connector *connector) 1145ironlake_dp_detect(struct drm_connector *connector)
1146{ 1146{
1147 struct intel_output *intel_output = to_intel_output(connector); 1147 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1148 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1148 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1149 enum drm_connector_status status; 1149 enum drm_connector_status status;
1150 1150
1151 status = connector_status_disconnected; 1151 status = connector_status_disconnected;
1152 if (intel_dp_aux_native_read(intel_output, 1152 if (intel_dp_aux_native_read(intel_encoder,
1153 0x000, dp_priv->dpcd, 1153 0x000, dp_priv->dpcd,
1154 sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) 1154 sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
1155 { 1155 {
@@ -1168,10 +1168,10 @@ ironlake_dp_detect(struct drm_connector *connector)
1168static enum drm_connector_status 1168static enum drm_connector_status
1169intel_dp_detect(struct drm_connector *connector) 1169intel_dp_detect(struct drm_connector *connector)
1170{ 1170{
1171 struct intel_output *intel_output = to_intel_output(connector); 1171 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1172 struct drm_device *dev = intel_output->base.dev; 1172 struct drm_device *dev = intel_encoder->base.dev;
1173 struct drm_i915_private *dev_priv = dev->dev_private; 1173 struct drm_i915_private *dev_priv = dev->dev_private;
1174 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1174 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1175 uint32_t temp, bit; 1175 uint32_t temp, bit;
1176 enum drm_connector_status status; 1176 enum drm_connector_status status;
1177 1177
@@ -1210,7 +1210,7 @@ intel_dp_detect(struct drm_connector *connector)
1210 return connector_status_disconnected; 1210 return connector_status_disconnected;
1211 1211
1212 status = connector_status_disconnected; 1212 status = connector_status_disconnected;
1213 if (intel_dp_aux_native_read(intel_output, 1213 if (intel_dp_aux_native_read(intel_encoder,
1214 0x000, dp_priv->dpcd, 1214 0x000, dp_priv->dpcd,
1215 sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) 1215 sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
1216 { 1216 {
@@ -1222,20 +1222,20 @@ intel_dp_detect(struct drm_connector *connector)
1222 1222
1223static int intel_dp_get_modes(struct drm_connector *connector) 1223static int intel_dp_get_modes(struct drm_connector *connector)
1224{ 1224{
1225 struct intel_output *intel_output = to_intel_output(connector); 1225 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1226 struct drm_device *dev = intel_output->base.dev; 1226 struct drm_device *dev = intel_encoder->base.dev;
1227 struct drm_i915_private *dev_priv = dev->dev_private; 1227 struct drm_i915_private *dev_priv = dev->dev_private;
1228 int ret; 1228 int ret;
1229 1229
1230 /* We should parse the EDID data and find out if it has an audio sink 1230 /* We should parse the EDID data and find out if it has an audio sink
1231 */ 1231 */
1232 1232
1233 ret = intel_ddc_get_modes(intel_output); 1233 ret = intel_ddc_get_modes(intel_encoder);
1234 if (ret) 1234 if (ret)
1235 return ret; 1235 return ret;
1236 1236
1237 /* if eDP has no EDID, try to use fixed panel mode from VBT */ 1237 /* if eDP has no EDID, try to use fixed panel mode from VBT */
1238 if (IS_eDP(intel_output)) { 1238 if (IS_eDP(intel_encoder)) {
1239 if (dev_priv->panel_fixed_mode != NULL) { 1239 if (dev_priv->panel_fixed_mode != NULL) {
1240 struct drm_display_mode *mode; 1240 struct drm_display_mode *mode;
1241 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); 1241 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
@@ -1249,13 +1249,13 @@ static int intel_dp_get_modes(struct drm_connector *connector)
1249static void 1249static void
1250intel_dp_destroy (struct drm_connector *connector) 1250intel_dp_destroy (struct drm_connector *connector)
1251{ 1251{
1252 struct intel_output *intel_output = to_intel_output(connector); 1252 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1253 1253
1254 if (intel_output->i2c_bus) 1254 if (intel_encoder->i2c_bus)
1255 intel_i2c_destroy(intel_output->i2c_bus); 1255 intel_i2c_destroy(intel_encoder->i2c_bus);
1256 drm_sysfs_connector_remove(connector); 1256 drm_sysfs_connector_remove(connector);
1257 drm_connector_cleanup(connector); 1257 drm_connector_cleanup(connector);
1258 kfree(intel_output); 1258 kfree(intel_encoder);
1259} 1259}
1260 1260
1261static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 1261static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
@@ -1291,12 +1291,12 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
1291}; 1291};
1292 1292
1293void 1293void
1294intel_dp_hot_plug(struct intel_output *intel_output) 1294intel_dp_hot_plug(struct intel_encoder *intel_encoder)
1295{ 1295{
1296 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1296 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1297 1297
1298 if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) 1298 if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
1299 intel_dp_check_link_status(intel_output); 1299 intel_dp_check_link_status(intel_encoder);
1300} 1300}
1301 1301
1302void 1302void
@@ -1304,53 +1304,53 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1304{ 1304{
1305 struct drm_i915_private *dev_priv = dev->dev_private; 1305 struct drm_i915_private *dev_priv = dev->dev_private;
1306 struct drm_connector *connector; 1306 struct drm_connector *connector;
1307 struct intel_output *intel_output; 1307 struct intel_encoder *intel_encoder;
1308 struct intel_dp_priv *dp_priv; 1308 struct intel_dp_priv *dp_priv;
1309 const char *name = NULL; 1309 const char *name = NULL;
1310 1310
1311 intel_output = kcalloc(sizeof(struct intel_output) + 1311 intel_encoder = kcalloc(sizeof(struct intel_encoder) +
1312 sizeof(struct intel_dp_priv), 1, GFP_KERNEL); 1312 sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
1313 if (!intel_output) 1313 if (!intel_encoder)
1314 return; 1314 return;
1315 1315
1316 dp_priv = (struct intel_dp_priv *)(intel_output + 1); 1316 dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
1317 1317
1318 connector = &intel_output->base; 1318 connector = &intel_encoder->base;
1319 drm_connector_init(dev, connector, &intel_dp_connector_funcs, 1319 drm_connector_init(dev, connector, &intel_dp_connector_funcs,
1320 DRM_MODE_CONNECTOR_DisplayPort); 1320 DRM_MODE_CONNECTOR_DisplayPort);
1321 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 1321 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
1322 1322
1323 if (output_reg == DP_A) 1323 if (output_reg == DP_A)
1324 intel_output->type = INTEL_OUTPUT_EDP; 1324 intel_encoder->type = INTEL_OUTPUT_EDP;
1325 else 1325 else
1326 intel_output->type = INTEL_OUTPUT_DISPLAYPORT; 1326 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
1327 1327
1328 if (output_reg == DP_B || output_reg == PCH_DP_B) 1328 if (output_reg == DP_B || output_reg == PCH_DP_B)
1329 intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); 1329 intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
1330 else if (output_reg == DP_C || output_reg == PCH_DP_C) 1330 else if (output_reg == DP_C || output_reg == PCH_DP_C)
1331 intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); 1331 intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
1332 else if (output_reg == DP_D || output_reg == PCH_DP_D) 1332 else if (output_reg == DP_D || output_reg == PCH_DP_D)
1333 intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); 1333 intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
1334 1334
1335 if (IS_eDP(intel_output)) 1335 if (IS_eDP(intel_encoder))
1336 intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT); 1336 intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
1337 1337
1338 intel_output->crtc_mask = (1 << 0) | (1 << 1); 1338 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
1339 connector->interlace_allowed = true; 1339 connector->interlace_allowed = true;
1340 connector->doublescan_allowed = 0; 1340 connector->doublescan_allowed = 0;
1341 1341
1342 dp_priv->intel_output = intel_output; 1342 dp_priv->intel_encoder = intel_encoder;
1343 dp_priv->output_reg = output_reg; 1343 dp_priv->output_reg = output_reg;
1344 dp_priv->has_audio = false; 1344 dp_priv->has_audio = false;
1345 dp_priv->dpms_mode = DRM_MODE_DPMS_ON; 1345 dp_priv->dpms_mode = DRM_MODE_DPMS_ON;
1346 intel_output->dev_priv = dp_priv; 1346 intel_encoder->dev_priv = dp_priv;
1347 1347
1348 drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs, 1348 drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs,
1349 DRM_MODE_ENCODER_TMDS); 1349 DRM_MODE_ENCODER_TMDS);
1350 drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs); 1350 drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs);
1351 1351
1352 drm_mode_connector_attach_encoder(&intel_output->base, 1352 drm_mode_connector_attach_encoder(&intel_encoder->base,
1353 &intel_output->enc); 1353 &intel_encoder->enc);
1354 drm_sysfs_connector_add(connector); 1354 drm_sysfs_connector_add(connector);
1355 1355
1356 /* Set up the DDC bus. */ 1356 /* Set up the DDC bus. */
@@ -1378,10 +1378,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1378 break; 1378 break;
1379 } 1379 }
1380 1380
1381 intel_dp_i2c_init(intel_output, name); 1381 intel_dp_i2c_init(intel_encoder, name);
1382 1382
1383 intel_output->ddc_bus = &dp_priv->adapter; 1383 intel_encoder->ddc_bus = &dp_priv->adapter;
1384 intel_output->hot_plug = intel_dp_hot_plug; 1384 intel_encoder->hot_plug = intel_dp_hot_plug;
1385 1385
1386 if (output_reg == DP_A) { 1386 if (output_reg == DP_A) {
1387 /* initialize panel mode from VBT if available for eDP */ 1387 /* initialize panel mode from VBT if available for eDP */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 3a467ca57857..e30253755f12 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -95,7 +95,7 @@ struct intel_framebuffer {
95}; 95};
96 96
97 97
98struct intel_output { 98struct intel_encoder {
99 struct drm_connector base; 99 struct drm_connector base;
100 100
101 struct drm_encoder enc; 101 struct drm_encoder enc;
@@ -105,7 +105,7 @@ struct intel_output {
105 bool load_detect_temp; 105 bool load_detect_temp;
106 bool needs_tv_clock; 106 bool needs_tv_clock;
107 void *dev_priv; 107 void *dev_priv;
108 void (*hot_plug)(struct intel_output *); 108 void (*hot_plug)(struct intel_encoder *);
109 int crtc_mask; 109 int crtc_mask;
110 int clone_mask; 110 int clone_mask;
111}; 111};
@@ -152,15 +152,15 @@ struct intel_crtc {
152}; 152};
153 153
154#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) 154#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
155#define to_intel_output(x) container_of(x, struct intel_output, base) 155#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
156#define enc_to_intel_output(x) container_of(x, struct intel_output, enc) 156#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
157#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) 157#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
158 158
159struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, 159struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
160 const char *name); 160 const char *name);
161void intel_i2c_destroy(struct i2c_adapter *adapter); 161void intel_i2c_destroy(struct i2c_adapter *adapter);
162int intel_ddc_get_modes(struct intel_output *intel_output); 162int intel_ddc_get_modes(struct intel_encoder *intel_encoder);
163extern bool intel_ddc_probe(struct intel_output *intel_output); 163extern bool intel_ddc_probe(struct intel_encoder *intel_encoder);
164void intel_i2c_quirk_set(struct drm_device *dev, bool enable); 164void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
165void intel_i2c_reset_gmbus(struct drm_device *dev); 165void intel_i2c_reset_gmbus(struct drm_device *dev);
166 166
@@ -175,7 +175,7 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg);
175void 175void
176intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 176intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
177 struct drm_display_mode *adjusted_mode); 177 struct drm_display_mode *adjusted_mode);
178extern void intel_edp_link_config (struct intel_output *, int *, int *); 178extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
179 179
180 180
181extern int intel_panel_fitter_pipe (struct drm_device *dev); 181extern int intel_panel_fitter_pipe (struct drm_device *dev);
@@ -191,10 +191,10 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
191 struct drm_file *file_priv); 191 struct drm_file *file_priv);
192extern void intel_wait_for_vblank(struct drm_device *dev); 192extern void intel_wait_for_vblank(struct drm_device *dev);
193extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); 193extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
194extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, 194extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
195 struct drm_display_mode *mode, 195 struct drm_display_mode *mode,
196 int *dpms_mode); 196 int *dpms_mode);
197extern void intel_release_load_detect_pipe(struct intel_output *intel_output, 197extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
198 int dpms_mode); 198 int dpms_mode);
199 199
200extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); 200extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 0427ca5a2514..ebf213c96b9c 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -80,8 +80,8 @@ static struct intel_dvo_device intel_dvo_devices[] = {
80static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) 80static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
81{ 81{
82 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 82 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
83 struct intel_output *intel_output = enc_to_intel_output(encoder); 83 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
84 struct intel_dvo_device *dvo = intel_output->dev_priv; 84 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
85 u32 dvo_reg = dvo->dvo_reg; 85 u32 dvo_reg = dvo->dvo_reg;
86 u32 temp = I915_READ(dvo_reg); 86 u32 temp = I915_READ(dvo_reg);
87 87
@@ -99,8 +99,8 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
99static void intel_dvo_save(struct drm_connector *connector) 99static void intel_dvo_save(struct drm_connector *connector)
100{ 100{
101 struct drm_i915_private *dev_priv = connector->dev->dev_private; 101 struct drm_i915_private *dev_priv = connector->dev->dev_private;
102 struct intel_output *intel_output = to_intel_output(connector); 102 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
103 struct intel_dvo_device *dvo = intel_output->dev_priv; 103 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
104 104
105 /* Each output should probably just save the registers it touches, 105 /* Each output should probably just save the registers it touches,
106 * but for now, use more overkill. 106 * but for now, use more overkill.
@@ -115,8 +115,8 @@ static void intel_dvo_save(struct drm_connector *connector)
115static void intel_dvo_restore(struct drm_connector *connector) 115static void intel_dvo_restore(struct drm_connector *connector)
116{ 116{
117 struct drm_i915_private *dev_priv = connector->dev->dev_private; 117 struct drm_i915_private *dev_priv = connector->dev->dev_private;
118 struct intel_output *intel_output = to_intel_output(connector); 118 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
119 struct intel_dvo_device *dvo = intel_output->dev_priv; 119 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
120 120
121 dvo->dev_ops->restore(dvo); 121 dvo->dev_ops->restore(dvo);
122 122
@@ -128,8 +128,8 @@ static void intel_dvo_restore(struct drm_connector *connector)
128static int intel_dvo_mode_valid(struct drm_connector *connector, 128static int intel_dvo_mode_valid(struct drm_connector *connector,
129 struct drm_display_mode *mode) 129 struct drm_display_mode *mode)
130{ 130{
131 struct intel_output *intel_output = to_intel_output(connector); 131 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
132 struct intel_dvo_device *dvo = intel_output->dev_priv; 132 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
133 133
134 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 134 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
135 return MODE_NO_DBLESCAN; 135 return MODE_NO_DBLESCAN;
@@ -150,8 +150,8 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
150 struct drm_display_mode *mode, 150 struct drm_display_mode *mode,
151 struct drm_display_mode *adjusted_mode) 151 struct drm_display_mode *adjusted_mode)
152{ 152{
153 struct intel_output *intel_output = enc_to_intel_output(encoder); 153 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
154 struct intel_dvo_device *dvo = intel_output->dev_priv; 154 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
155 155
156 /* If we have timings from the BIOS for the panel, put them in 156 /* If we have timings from the BIOS for the panel, put them in
157 * to the adjusted mode. The CRTC will be set up for this mode, 157 * to the adjusted mode. The CRTC will be set up for this mode,
@@ -186,8 +186,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
186 struct drm_device *dev = encoder->dev; 186 struct drm_device *dev = encoder->dev;
187 struct drm_i915_private *dev_priv = dev->dev_private; 187 struct drm_i915_private *dev_priv = dev->dev_private;
188 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 188 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
189 struct intel_output *intel_output = enc_to_intel_output(encoder); 189 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
190 struct intel_dvo_device *dvo = intel_output->dev_priv; 190 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
191 int pipe = intel_crtc->pipe; 191 int pipe = intel_crtc->pipe;
192 u32 dvo_val; 192 u32 dvo_val;
193 u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg; 193 u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg;
@@ -241,23 +241,23 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
241 */ 241 */
242static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) 242static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
243{ 243{
244 struct intel_output *intel_output = to_intel_output(connector); 244 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
245 struct intel_dvo_device *dvo = intel_output->dev_priv; 245 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
246 246
247 return dvo->dev_ops->detect(dvo); 247 return dvo->dev_ops->detect(dvo);
248} 248}
249 249
250static int intel_dvo_get_modes(struct drm_connector *connector) 250static int intel_dvo_get_modes(struct drm_connector *connector)
251{ 251{
252 struct intel_output *intel_output = to_intel_output(connector); 252 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
253 struct intel_dvo_device *dvo = intel_output->dev_priv; 253 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
254 254
255 /* We should probably have an i2c driver get_modes function for those 255 /* We should probably have an i2c driver get_modes function for those
256 * devices which will have a fixed set of modes determined by the chip 256 * devices which will have a fixed set of modes determined by the chip
257 * (TV-out, for example), but for now with just TMDS and LVDS, 257 * (TV-out, for example), but for now with just TMDS and LVDS,
258 * that's not the case. 258 * that's not the case.
259 */ 259 */
260 intel_ddc_get_modes(intel_output); 260 intel_ddc_get_modes(intel_encoder);
261 if (!list_empty(&connector->probed_modes)) 261 if (!list_empty(&connector->probed_modes))
262 return 1; 262 return 1;
263 263
@@ -275,8 +275,8 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
275 275
276static void intel_dvo_destroy (struct drm_connector *connector) 276static void intel_dvo_destroy (struct drm_connector *connector)
277{ 277{
278 struct intel_output *intel_output = to_intel_output(connector); 278 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
279 struct intel_dvo_device *dvo = intel_output->dev_priv; 279 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
280 280
281 if (dvo) { 281 if (dvo) {
282 if (dvo->dev_ops->destroy) 282 if (dvo->dev_ops->destroy)
@@ -286,13 +286,13 @@ static void intel_dvo_destroy (struct drm_connector *connector)
286 /* no need, in i830_dvoices[] now */ 286 /* no need, in i830_dvoices[] now */
287 //kfree(dvo); 287 //kfree(dvo);
288 } 288 }
289 if (intel_output->i2c_bus) 289 if (intel_encoder->i2c_bus)
290 intel_i2c_destroy(intel_output->i2c_bus); 290 intel_i2c_destroy(intel_encoder->i2c_bus);
291 if (intel_output->ddc_bus) 291 if (intel_encoder->ddc_bus)
292 intel_i2c_destroy(intel_output->ddc_bus); 292 intel_i2c_destroy(intel_encoder->ddc_bus);
293 drm_sysfs_connector_remove(connector); 293 drm_sysfs_connector_remove(connector);
294 drm_connector_cleanup(connector); 294 drm_connector_cleanup(connector);
295 kfree(intel_output); 295 kfree(intel_encoder);
296} 296}
297 297
298#ifdef RANDR_GET_CRTC_INTERFACE 298#ifdef RANDR_GET_CRTC_INTERFACE
@@ -300,8 +300,8 @@ static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector)
300{ 300{
301 struct drm_device *dev = connector->dev; 301 struct drm_device *dev = connector->dev;
302 struct drm_i915_private *dev_priv = dev->dev_private; 302 struct drm_i915_private *dev_priv = dev->dev_private;
303 struct intel_output *intel_output = to_intel_output(connector); 303 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
304 struct intel_dvo_device *dvo = intel_output->dev_priv; 304 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
305 int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT); 305 int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT);
306 306
307 return intel_pipe_to_crtc(pScrn, pipe); 307 return intel_pipe_to_crtc(pScrn, pipe);
@@ -352,8 +352,8 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
352{ 352{
353 struct drm_device *dev = connector->dev; 353 struct drm_device *dev = connector->dev;
354 struct drm_i915_private *dev_priv = dev->dev_private; 354 struct drm_i915_private *dev_priv = dev->dev_private;
355 struct intel_output *intel_output = to_intel_output(connector); 355 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
356 struct intel_dvo_device *dvo = intel_output->dev_priv; 356 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
357 uint32_t dvo_reg = dvo->dvo_reg; 357 uint32_t dvo_reg = dvo->dvo_reg;
358 uint32_t dvo_val = I915_READ(dvo_reg); 358 uint32_t dvo_val = I915_READ(dvo_reg);
359 struct drm_display_mode *mode = NULL; 359 struct drm_display_mode *mode = NULL;
@@ -383,24 +383,24 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
383 383
384void intel_dvo_init(struct drm_device *dev) 384void intel_dvo_init(struct drm_device *dev)
385{ 385{
386 struct intel_output *intel_output; 386 struct intel_encoder *intel_encoder;
387 struct intel_dvo_device *dvo; 387 struct intel_dvo_device *dvo;
388 struct i2c_adapter *i2cbus = NULL; 388 struct i2c_adapter *i2cbus = NULL;
389 int ret = 0; 389 int ret = 0;
390 int i; 390 int i;
391 int encoder_type = DRM_MODE_ENCODER_NONE; 391 int encoder_type = DRM_MODE_ENCODER_NONE;
392 intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL); 392 intel_encoder = kzalloc (sizeof(struct intel_encoder), GFP_KERNEL);
393 if (!intel_output) 393 if (!intel_encoder)
394 return; 394 return;
395 395
396 /* Set up the DDC bus */ 396 /* Set up the DDC bus */
397 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); 397 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
398 if (!intel_output->ddc_bus) 398 if (!intel_encoder->ddc_bus)
399 goto free_intel; 399 goto free_intel;
400 400
401 /* Now, try to find a controller */ 401 /* Now, try to find a controller */
402 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { 402 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
403 struct drm_connector *connector = &intel_output->base; 403 struct drm_connector *connector = &intel_encoder->base;
404 int gpio; 404 int gpio;
405 405
406 dvo = &intel_dvo_devices[i]; 406 dvo = &intel_dvo_devices[i];
@@ -435,11 +435,11 @@ void intel_dvo_init(struct drm_device *dev)
435 if (!ret) 435 if (!ret)
436 continue; 436 continue;
437 437
438 intel_output->type = INTEL_OUTPUT_DVO; 438 intel_encoder->type = INTEL_OUTPUT_DVO;
439 intel_output->crtc_mask = (1 << 0) | (1 << 1); 439 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
440 switch (dvo->type) { 440 switch (dvo->type) {
441 case INTEL_DVO_CHIP_TMDS: 441 case INTEL_DVO_CHIP_TMDS:
442 intel_output->clone_mask = 442 intel_encoder->clone_mask =
443 (1 << INTEL_DVO_TMDS_CLONE_BIT) | 443 (1 << INTEL_DVO_TMDS_CLONE_BIT) |
444 (1 << INTEL_ANALOG_CLONE_BIT); 444 (1 << INTEL_ANALOG_CLONE_BIT);
445 drm_connector_init(dev, connector, 445 drm_connector_init(dev, connector,
@@ -448,7 +448,7 @@ void intel_dvo_init(struct drm_device *dev)
448 encoder_type = DRM_MODE_ENCODER_TMDS; 448 encoder_type = DRM_MODE_ENCODER_TMDS;
449 break; 449 break;
450 case INTEL_DVO_CHIP_LVDS: 450 case INTEL_DVO_CHIP_LVDS:
451 intel_output->clone_mask = 451 intel_encoder->clone_mask =
452 (1 << INTEL_DVO_LVDS_CLONE_BIT); 452 (1 << INTEL_DVO_LVDS_CLONE_BIT);
453 drm_connector_init(dev, connector, 453 drm_connector_init(dev, connector,
454 &intel_dvo_connector_funcs, 454 &intel_dvo_connector_funcs,
@@ -463,16 +463,16 @@ void intel_dvo_init(struct drm_device *dev)
463 connector->interlace_allowed = false; 463 connector->interlace_allowed = false;
464 connector->doublescan_allowed = false; 464 connector->doublescan_allowed = false;
465 465
466 intel_output->dev_priv = dvo; 466 intel_encoder->dev_priv = dvo;
467 intel_output->i2c_bus = i2cbus; 467 intel_encoder->i2c_bus = i2cbus;
468 468
469 drm_encoder_init(dev, &intel_output->enc, 469 drm_encoder_init(dev, &intel_encoder->enc,
470 &intel_dvo_enc_funcs, encoder_type); 470 &intel_dvo_enc_funcs, encoder_type);
471 drm_encoder_helper_add(&intel_output->enc, 471 drm_encoder_helper_add(&intel_encoder->enc,
472 &intel_dvo_helper_funcs); 472 &intel_dvo_helper_funcs);
473 473
474 drm_mode_connector_attach_encoder(&intel_output->base, 474 drm_mode_connector_attach_encoder(&intel_encoder->base,
475 &intel_output->enc); 475 &intel_encoder->enc);
476 if (dvo->type == INTEL_DVO_CHIP_LVDS) { 476 if (dvo->type == INTEL_DVO_CHIP_LVDS) {
477 /* For our LVDS chipsets, we should hopefully be able 477 /* For our LVDS chipsets, we should hopefully be able
478 * to dig the fixed panel mode out of the BIOS data. 478 * to dig the fixed panel mode out of the BIOS data.
@@ -490,10 +490,10 @@ void intel_dvo_init(struct drm_device *dev)
490 return; 490 return;
491 } 491 }
492 492
493 intel_i2c_destroy(intel_output->ddc_bus); 493 intel_i2c_destroy(intel_encoder->ddc_bus);
494 /* Didn't find a chip, so tear down. */ 494 /* Didn't find a chip, so tear down. */
495 if (i2cbus != NULL) 495 if (i2cbus != NULL)
496 intel_i2c_destroy(i2cbus); 496 intel_i2c_destroy(i2cbus);
497free_intel: 497free_intel:
498 kfree(intel_output); 498 kfree(intel_encoder);
499} 499}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 69bbef92f130..8a0b3bcdc7b1 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -144,7 +144,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
144 ret = -ENOMEM; 144 ret = -ENOMEM;
145 goto out; 145 goto out;
146 } 146 }
147 obj_priv = fbo->driver_private; 147 obj_priv = to_intel_bo(fbo);
148 148
149 mutex_lock(&dev->struct_mutex); 149 mutex_lock(&dev->struct_mutex);
150 150
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 1ed02f641258..48cade0cf7b1 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -51,8 +51,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
51 struct drm_i915_private *dev_priv = dev->dev_private; 51 struct drm_i915_private *dev_priv = dev->dev_private;
52 struct drm_crtc *crtc = encoder->crtc; 52 struct drm_crtc *crtc = encoder->crtc;
53 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 53 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
54 struct intel_output *intel_output = enc_to_intel_output(encoder); 54 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
55 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 55 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
56 u32 sdvox; 56 u32 sdvox;
57 57
58 sdvox = SDVO_ENCODING_HDMI | 58 sdvox = SDVO_ENCODING_HDMI |
@@ -74,8 +74,8 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
74{ 74{
75 struct drm_device *dev = encoder->dev; 75 struct drm_device *dev = encoder->dev;
76 struct drm_i915_private *dev_priv = dev->dev_private; 76 struct drm_i915_private *dev_priv = dev->dev_private;
77 struct intel_output *intel_output = enc_to_intel_output(encoder); 77 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
78 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 78 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
79 u32 temp; 79 u32 temp;
80 80
81 temp = I915_READ(hdmi_priv->sdvox_reg); 81 temp = I915_READ(hdmi_priv->sdvox_reg);
@@ -110,8 +110,8 @@ static void intel_hdmi_save(struct drm_connector *connector)
110{ 110{
111 struct drm_device *dev = connector->dev; 111 struct drm_device *dev = connector->dev;
112 struct drm_i915_private *dev_priv = dev->dev_private; 112 struct drm_i915_private *dev_priv = dev->dev_private;
113 struct intel_output *intel_output = to_intel_output(connector); 113 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
114 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 114 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
115 115
116 hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg); 116 hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg);
117} 117}
@@ -120,8 +120,8 @@ static void intel_hdmi_restore(struct drm_connector *connector)
120{ 120{
121 struct drm_device *dev = connector->dev; 121 struct drm_device *dev = connector->dev;
122 struct drm_i915_private *dev_priv = dev->dev_private; 122 struct drm_i915_private *dev_priv = dev->dev_private;
123 struct intel_output *intel_output = to_intel_output(connector); 123 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
124 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 124 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
125 125
126 I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX); 126 I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX);
127 POSTING_READ(hdmi_priv->sdvox_reg); 127 POSTING_READ(hdmi_priv->sdvox_reg);
@@ -151,21 +151,21 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
151static enum drm_connector_status 151static enum drm_connector_status
152intel_hdmi_detect(struct drm_connector *connector) 152intel_hdmi_detect(struct drm_connector *connector)
153{ 153{
154 struct intel_output *intel_output = to_intel_output(connector); 154 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
155 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 155 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
156 struct edid *edid = NULL; 156 struct edid *edid = NULL;
157 enum drm_connector_status status = connector_status_disconnected; 157 enum drm_connector_status status = connector_status_disconnected;
158 158
159 hdmi_priv->has_hdmi_sink = false; 159 hdmi_priv->has_hdmi_sink = false;
160 edid = drm_get_edid(&intel_output->base, 160 edid = drm_get_edid(&intel_encoder->base,
161 intel_output->ddc_bus); 161 intel_encoder->ddc_bus);
162 162
163 if (edid) { 163 if (edid) {
164 if (edid->input & DRM_EDID_INPUT_DIGITAL) { 164 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
165 status = connector_status_connected; 165 status = connector_status_connected;
166 hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); 166 hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
167 } 167 }
168 intel_output->base.display_info.raw_edid = NULL; 168 intel_encoder->base.display_info.raw_edid = NULL;
169 kfree(edid); 169 kfree(edid);
170 } 170 }
171 171
@@ -174,24 +174,24 @@ intel_hdmi_detect(struct drm_connector *connector)
174 174
175static int intel_hdmi_get_modes(struct drm_connector *connector) 175static int intel_hdmi_get_modes(struct drm_connector *connector)
176{ 176{
177 struct intel_output *intel_output = to_intel_output(connector); 177 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
178 178
179 /* We should parse the EDID data and find out if it's an HDMI sink so 179 /* We should parse the EDID data and find out if it's an HDMI sink so
180 * we can send audio to it. 180 * we can send audio to it.
181 */ 181 */
182 182
183 return intel_ddc_get_modes(intel_output); 183 return intel_ddc_get_modes(intel_encoder);
184} 184}
185 185
186static void intel_hdmi_destroy(struct drm_connector *connector) 186static void intel_hdmi_destroy(struct drm_connector *connector)
187{ 187{
188 struct intel_output *intel_output = to_intel_output(connector); 188 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
189 189
190 if (intel_output->i2c_bus) 190 if (intel_encoder->i2c_bus)
191 intel_i2c_destroy(intel_output->i2c_bus); 191 intel_i2c_destroy(intel_encoder->i2c_bus);
192 drm_sysfs_connector_remove(connector); 192 drm_sysfs_connector_remove(connector);
193 drm_connector_cleanup(connector); 193 drm_connector_cleanup(connector);
194 kfree(intel_output); 194 kfree(intel_encoder);
195} 195}
196 196
197static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { 197static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
@@ -230,63 +230,63 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
230{ 230{
231 struct drm_i915_private *dev_priv = dev->dev_private; 231 struct drm_i915_private *dev_priv = dev->dev_private;
232 struct drm_connector *connector; 232 struct drm_connector *connector;
233 struct intel_output *intel_output; 233 struct intel_encoder *intel_encoder;
234 struct intel_hdmi_priv *hdmi_priv; 234 struct intel_hdmi_priv *hdmi_priv;
235 235
236 intel_output = kcalloc(sizeof(struct intel_output) + 236 intel_encoder = kcalloc(sizeof(struct intel_encoder) +
237 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); 237 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
238 if (!intel_output) 238 if (!intel_encoder)
239 return; 239 return;
240 hdmi_priv = (struct intel_hdmi_priv *)(intel_output + 1); 240 hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1);
241 241
242 connector = &intel_output->base; 242 connector = &intel_encoder->base;
243 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, 243 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
244 DRM_MODE_CONNECTOR_HDMIA); 244 DRM_MODE_CONNECTOR_HDMIA);
245 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); 245 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
246 246
247 intel_output->type = INTEL_OUTPUT_HDMI; 247 intel_encoder->type = INTEL_OUTPUT_HDMI;
248 248
249 connector->interlace_allowed = 0; 249 connector->interlace_allowed = 0;
250 connector->doublescan_allowed = 0; 250 connector->doublescan_allowed = 0;
251 intel_output->crtc_mask = (1 << 0) | (1 << 1); 251 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
252 252
253 /* Set up the DDC bus. */ 253 /* Set up the DDC bus. */
254 if (sdvox_reg == SDVOB) { 254 if (sdvox_reg == SDVOB) {
255 intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); 255 intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
256 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); 256 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
257 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; 257 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
258 } else if (sdvox_reg == SDVOC) { 258 } else if (sdvox_reg == SDVOC) {
259 intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); 259 intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
260 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); 260 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
261 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; 261 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
262 } else if (sdvox_reg == HDMIB) { 262 } else if (sdvox_reg == HDMIB) {
263 intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); 263 intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
264 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, 264 intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
265 "HDMIB"); 265 "HDMIB");
266 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; 266 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
267 } else if (sdvox_reg == HDMIC) { 267 } else if (sdvox_reg == HDMIC) {
268 intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); 268 intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
269 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, 269 intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
270 "HDMIC"); 270 "HDMIC");
271 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; 271 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
272 } else if (sdvox_reg == HDMID) { 272 } else if (sdvox_reg == HDMID) {
273 intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); 273 intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
274 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, 274 intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
275 "HDMID"); 275 "HDMID");
276 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; 276 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
277 } 277 }
278 if (!intel_output->ddc_bus) 278 if (!intel_encoder->ddc_bus)
279 goto err_connector; 279 goto err_connector;
280 280
281 hdmi_priv->sdvox_reg = sdvox_reg; 281 hdmi_priv->sdvox_reg = sdvox_reg;
282 intel_output->dev_priv = hdmi_priv; 282 intel_encoder->dev_priv = hdmi_priv;
283 283
284 drm_encoder_init(dev, &intel_output->enc, &intel_hdmi_enc_funcs, 284 drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs,
285 DRM_MODE_ENCODER_TMDS); 285 DRM_MODE_ENCODER_TMDS);
286 drm_encoder_helper_add(&intel_output->enc, &intel_hdmi_helper_funcs); 286 drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
287 287
288 drm_mode_connector_attach_encoder(&intel_output->base, 288 drm_mode_connector_attach_encoder(&intel_encoder->base,
289 &intel_output->enc); 289 &intel_encoder->enc);
290 drm_sysfs_connector_add(connector); 290 drm_sysfs_connector_add(connector);
291 291
292 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 292 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -302,7 +302,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
302 302
303err_connector: 303err_connector:
304 drm_connector_cleanup(connector); 304 drm_connector_cleanup(connector);
305 kfree(intel_output); 305 kfree(intel_encoder);
306 306
307 return; 307 return;
308} 308}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 216e9f52b6e0..b66806a37d37 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -239,8 +239,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
239 struct drm_i915_private *dev_priv = dev->dev_private; 239 struct drm_i915_private *dev_priv = dev->dev_private;
240 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 240 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
241 struct drm_encoder *tmp_encoder; 241 struct drm_encoder *tmp_encoder;
242 struct intel_output *intel_output = enc_to_intel_output(encoder); 242 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
243 struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; 243 struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
244 u32 pfit_control = 0, pfit_pgm_ratios = 0; 244 u32 pfit_control = 0, pfit_pgm_ratios = 0;
245 int left_border = 0, right_border = 0, top_border = 0; 245 int left_border = 0, right_border = 0, top_border = 0;
246 int bottom_border = 0; 246 int bottom_border = 0;
@@ -587,8 +587,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
587{ 587{
588 struct drm_device *dev = encoder->dev; 588 struct drm_device *dev = encoder->dev;
589 struct drm_i915_private *dev_priv = dev->dev_private; 589 struct drm_i915_private *dev_priv = dev->dev_private;
590 struct intel_output *intel_output = enc_to_intel_output(encoder); 590 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
591 struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; 591 struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
592 592
593 /* 593 /*
594 * The LVDS pin pair will already have been turned on in the 594 * The LVDS pin pair will already have been turned on in the
@@ -635,14 +635,16 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
635static int intel_lvds_get_modes(struct drm_connector *connector) 635static int intel_lvds_get_modes(struct drm_connector *connector)
636{ 636{
637 struct drm_device *dev = connector->dev; 637 struct drm_device *dev = connector->dev;
638 struct intel_output *intel_output = to_intel_output(connector); 638 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
639 struct drm_i915_private *dev_priv = dev->dev_private; 639 struct drm_i915_private *dev_priv = dev->dev_private;
640 int ret = 0; 640 int ret = 0;
641 641
642 ret = intel_ddc_get_modes(intel_output); 642 if (dev_priv->lvds_edid_good) {
643 ret = intel_ddc_get_modes(intel_encoder);
643 644
644 if (ret) 645 if (ret)
645 return ret; 646 return ret;
647 }
646 648
647 /* Didn't get an EDID, so 649 /* Didn't get an EDID, so
648 * Set wide sync ranges so we get all modes 650 * Set wide sync ranges so we get all modes
@@ -715,11 +717,11 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
715static void intel_lvds_destroy(struct drm_connector *connector) 717static void intel_lvds_destroy(struct drm_connector *connector)
716{ 718{
717 struct drm_device *dev = connector->dev; 719 struct drm_device *dev = connector->dev;
718 struct intel_output *intel_output = to_intel_output(connector); 720 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
719 struct drm_i915_private *dev_priv = dev->dev_private; 721 struct drm_i915_private *dev_priv = dev->dev_private;
720 722
721 if (intel_output->ddc_bus) 723 if (intel_encoder->ddc_bus)
722 intel_i2c_destroy(intel_output->ddc_bus); 724 intel_i2c_destroy(intel_encoder->ddc_bus);
723 if (dev_priv->lid_notifier.notifier_call) 725 if (dev_priv->lid_notifier.notifier_call)
724 acpi_lid_notifier_unregister(&dev_priv->lid_notifier); 726 acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
725 drm_sysfs_connector_remove(connector); 727 drm_sysfs_connector_remove(connector);
@@ -732,13 +734,13 @@ static int intel_lvds_set_property(struct drm_connector *connector,
732 uint64_t value) 734 uint64_t value)
733{ 735{
734 struct drm_device *dev = connector->dev; 736 struct drm_device *dev = connector->dev;
735 struct intel_output *intel_output = 737 struct intel_encoder *intel_encoder =
736 to_intel_output(connector); 738 to_intel_encoder(connector);
737 739
738 if (property == dev->mode_config.scaling_mode_property && 740 if (property == dev->mode_config.scaling_mode_property &&
739 connector->encoder) { 741 connector->encoder) {
740 struct drm_crtc *crtc = connector->encoder->crtc; 742 struct drm_crtc *crtc = connector->encoder->crtc;
741 struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; 743 struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
742 if (value == DRM_MODE_SCALE_NONE) { 744 if (value == DRM_MODE_SCALE_NONE) {
743 DRM_DEBUG_KMS("no scaling not supported\n"); 745 DRM_DEBUG_KMS("no scaling not supported\n");
744 return 0; 746 return 0;
@@ -858,6 +860,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
858 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), 860 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
859 }, 861 },
860 }, 862 },
863 {
864 .callback = intel_no_lvds_dmi_callback,
865 .ident = "Clientron U800",
866 .matches = {
867 DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
868 DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
869 },
870 },
861 871
862 { } /* terminating entry */ 872 { } /* terminating entry */
863}; 873};
@@ -968,7 +978,7 @@ static int lvds_is_present_in_vbt(struct drm_device *dev)
968void intel_lvds_init(struct drm_device *dev) 978void intel_lvds_init(struct drm_device *dev)
969{ 979{
970 struct drm_i915_private *dev_priv = dev->dev_private; 980 struct drm_i915_private *dev_priv = dev->dev_private;
971 struct intel_output *intel_output; 981 struct intel_encoder *intel_encoder;
972 struct drm_connector *connector; 982 struct drm_connector *connector;
973 struct drm_encoder *encoder; 983 struct drm_encoder *encoder;
974 struct drm_display_mode *scan; /* *modes, *bios_mode; */ 984 struct drm_display_mode *scan; /* *modes, *bios_mode; */
@@ -996,40 +1006,40 @@ void intel_lvds_init(struct drm_device *dev)
996 gpio = PCH_GPIOC; 1006 gpio = PCH_GPIOC;
997 } 1007 }
998 1008
999 intel_output = kzalloc(sizeof(struct intel_output) + 1009 intel_encoder = kzalloc(sizeof(struct intel_encoder) +
1000 sizeof(struct intel_lvds_priv), GFP_KERNEL); 1010 sizeof(struct intel_lvds_priv), GFP_KERNEL);
1001 if (!intel_output) { 1011 if (!intel_encoder) {
1002 return; 1012 return;
1003 } 1013 }
1004 1014
1005 connector = &intel_output->base; 1015 connector = &intel_encoder->base;
1006 encoder = &intel_output->enc; 1016 encoder = &intel_encoder->enc;
1007 drm_connector_init(dev, &intel_output->base, &intel_lvds_connector_funcs, 1017 drm_connector_init(dev, &intel_encoder->base, &intel_lvds_connector_funcs,
1008 DRM_MODE_CONNECTOR_LVDS); 1018 DRM_MODE_CONNECTOR_LVDS);
1009 1019
1010 drm_encoder_init(dev, &intel_output->enc, &intel_lvds_enc_funcs, 1020 drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs,
1011 DRM_MODE_ENCODER_LVDS); 1021 DRM_MODE_ENCODER_LVDS);
1012 1022
1013 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 1023 drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
1014 intel_output->type = INTEL_OUTPUT_LVDS; 1024 intel_encoder->type = INTEL_OUTPUT_LVDS;
1015 1025
1016 intel_output->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); 1026 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
1017 intel_output->crtc_mask = (1 << 1); 1027 intel_encoder->crtc_mask = (1 << 1);
1018 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); 1028 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
1019 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); 1029 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
1020 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 1030 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1021 connector->interlace_allowed = false; 1031 connector->interlace_allowed = false;
1022 connector->doublescan_allowed = false; 1032 connector->doublescan_allowed = false;
1023 1033
1024 lvds_priv = (struct intel_lvds_priv *)(intel_output + 1); 1034 lvds_priv = (struct intel_lvds_priv *)(intel_encoder + 1);
1025 intel_output->dev_priv = lvds_priv; 1035 intel_encoder->dev_priv = lvds_priv;
1026 /* create the scaling mode property */ 1036 /* create the scaling mode property */
1027 drm_mode_create_scaling_mode_property(dev); 1037 drm_mode_create_scaling_mode_property(dev);
1028 /* 1038 /*
1029 * the initial panel fitting mode will be FULL_SCREEN. 1039 * the initial panel fitting mode will be FULL_SCREEN.
1030 */ 1040 */
1031 1041
1032 drm_connector_attach_property(&intel_output->base, 1042 drm_connector_attach_property(&intel_encoder->base,
1033 dev->mode_config.scaling_mode_property, 1043 dev->mode_config.scaling_mode_property,
1034 DRM_MODE_SCALE_FULLSCREEN); 1044 DRM_MODE_SCALE_FULLSCREEN);
1035 lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; 1045 lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN;
@@ -1044,8 +1054,8 @@ void intel_lvds_init(struct drm_device *dev)
1044 */ 1054 */
1045 1055
1046 /* Set up the DDC bus. */ 1056 /* Set up the DDC bus. */
1047 intel_output->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C"); 1057 intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C");
1048 if (!intel_output->ddc_bus) { 1058 if (!intel_encoder->ddc_bus) {
1049 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " 1059 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
1050 "failed.\n"); 1060 "failed.\n");
1051 goto failed; 1061 goto failed;
@@ -1055,7 +1065,10 @@ void intel_lvds_init(struct drm_device *dev)
1055 * Attempt to get the fixed panel mode from DDC. Assume that the 1065 * Attempt to get the fixed panel mode from DDC. Assume that the
1056 * preferred mode is the right one. 1066 * preferred mode is the right one.
1057 */ 1067 */
1058 intel_ddc_get_modes(intel_output); 1068 dev_priv->lvds_edid_good = true;
1069
1070 if (!intel_ddc_get_modes(intel_encoder))
1071 dev_priv->lvds_edid_good = false;
1059 1072
1060 list_for_each_entry(scan, &connector->probed_modes, head) { 1073 list_for_each_entry(scan, &connector->probed_modes, head) {
1061 mutex_lock(&dev->mode_config.mutex); 1074 mutex_lock(&dev->mode_config.mutex);
@@ -1133,9 +1146,9 @@ out:
1133 1146
1134failed: 1147failed:
1135 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); 1148 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
1136 if (intel_output->ddc_bus) 1149 if (intel_encoder->ddc_bus)
1137 intel_i2c_destroy(intel_output->ddc_bus); 1150 intel_i2c_destroy(intel_encoder->ddc_bus);
1138 drm_connector_cleanup(connector); 1151 drm_connector_cleanup(connector);
1139 drm_encoder_cleanup(encoder); 1152 drm_encoder_cleanup(encoder);
1140 kfree(intel_output); 1153 kfree(intel_encoder);
1141} 1154}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 89d303d1d3fb..8e5c83b2d120 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -34,7 +34,7 @@
34 * intel_ddc_probe 34 * intel_ddc_probe
35 * 35 *
36 */ 36 */
37bool intel_ddc_probe(struct intel_output *intel_output) 37bool intel_ddc_probe(struct intel_encoder *intel_encoder)
38{ 38{
39 u8 out_buf[] = { 0x0, 0x0}; 39 u8 out_buf[] = { 0x0, 0x0};
40 u8 buf[2]; 40 u8 buf[2];
@@ -54,9 +54,9 @@ bool intel_ddc_probe(struct intel_output *intel_output)
54 } 54 }
55 }; 55 };
56 56
57 intel_i2c_quirk_set(intel_output->base.dev, true); 57 intel_i2c_quirk_set(intel_encoder->base.dev, true);
58 ret = i2c_transfer(intel_output->ddc_bus, msgs, 2); 58 ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
59 intel_i2c_quirk_set(intel_output->base.dev, false); 59 intel_i2c_quirk_set(intel_encoder->base.dev, false);
60 if (ret == 2) 60 if (ret == 2)
61 return true; 61 return true;
62 62
@@ -69,19 +69,19 @@ bool intel_ddc_probe(struct intel_output *intel_output)
69 * 69 *
70 * Fetch the EDID information from @connector using the DDC bus. 70 * Fetch the EDID information from @connector using the DDC bus.
71 */ 71 */
72int intel_ddc_get_modes(struct intel_output *intel_output) 72int intel_ddc_get_modes(struct intel_encoder *intel_encoder)
73{ 73{
74 struct edid *edid; 74 struct edid *edid;
75 int ret = 0; 75 int ret = 0;
76 76
77 intel_i2c_quirk_set(intel_output->base.dev, true); 77 intel_i2c_quirk_set(intel_encoder->base.dev, true);
78 edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus); 78 edid = drm_get_edid(&intel_encoder->base, intel_encoder->ddc_bus);
79 intel_i2c_quirk_set(intel_output->base.dev, false); 79 intel_i2c_quirk_set(intel_encoder->base.dev, false);
80 if (edid) { 80 if (edid) {
81 drm_mode_connector_update_edid_property(&intel_output->base, 81 drm_mode_connector_update_edid_property(&intel_encoder->base,
82 edid); 82 edid);
83 ret = drm_add_edid_modes(&intel_output->base, edid); 83 ret = drm_add_edid_modes(&intel_encoder->base, edid);
84 intel_output->base.display_info.raw_edid = NULL; 84 intel_encoder->base.display_info.raw_edid = NULL;
85 kfree(edid); 85 kfree(edid);
86 } 86 }
87 87
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 60595fc26fdd..6d524a1fc271 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -724,7 +724,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
724 int ret, tmp_width; 724 int ret, tmp_width;
725 struct overlay_registers *regs; 725 struct overlay_registers *regs;
726 bool scale_changed = false; 726 bool scale_changed = false;
727 struct drm_i915_gem_object *bo_priv = new_bo->driver_private; 727 struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo);
728 struct drm_device *dev = overlay->dev; 728 struct drm_device *dev = overlay->dev;
729 729
730 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 730 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -809,7 +809,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
809 intel_overlay_continue(overlay, scale_changed); 809 intel_overlay_continue(overlay, scale_changed);
810 810
811 overlay->old_vid_bo = overlay->vid_bo; 811 overlay->old_vid_bo = overlay->vid_bo;
812 overlay->vid_bo = new_bo->driver_private; 812 overlay->vid_bo = to_intel_bo(new_bo);
813 813
814 return 0; 814 return 0;
815 815
@@ -1344,7 +1344,7 @@ void intel_setup_overlay(struct drm_device *dev)
1344 reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE); 1344 reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE);
1345 if (!reg_bo) 1345 if (!reg_bo)
1346 goto out_free; 1346 goto out_free;
1347 overlay->reg_bo = reg_bo->driver_private; 1347 overlay->reg_bo = to_intel_bo(reg_bo);
1348 1348
1349 if (OVERLAY_NONPHYSICAL(dev)) { 1349 if (OVERLAY_NONPHYSICAL(dev)) {
1350 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE); 1350 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 26e13a0bf30b..87d953664cb0 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -54,7 +54,7 @@ struct intel_sdvo_priv {
54 u8 slave_addr; 54 u8 slave_addr;
55 55
56 /* Register for the SDVO device: SDVOB or SDVOC */ 56 /* Register for the SDVO device: SDVOB or SDVOC */
57 int output_device; 57 int sdvo_reg;
58 58
59 /* Active outputs controlled by this SDVO output */ 59 /* Active outputs controlled by this SDVO output */
60 uint16_t controlled_output; 60 uint16_t controlled_output;
@@ -124,7 +124,7 @@ struct intel_sdvo_priv {
124 */ 124 */
125 struct intel_sdvo_encode encode; 125 struct intel_sdvo_encode encode;
126 126
127 /* DDC bus used by this SDVO output */ 127 /* DDC bus used by this SDVO encoder */
128 uint8_t ddc_bus; 128 uint8_t ddc_bus;
129 129
130 /* Mac mini hack -- use the same DDC as the analog connector */ 130 /* Mac mini hack -- use the same DDC as the analog connector */
@@ -162,22 +162,22 @@ struct intel_sdvo_priv {
162}; 162};
163 163
164static bool 164static bool
165intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags); 165intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags);
166 166
167/** 167/**
168 * Writes the SDVOB or SDVOC with the given value, but always writes both 168 * Writes the SDVOB or SDVOC with the given value, but always writes both
169 * SDVOB and SDVOC to work around apparent hardware issues (according to 169 * SDVOB and SDVOC to work around apparent hardware issues (according to
170 * comments in the BIOS). 170 * comments in the BIOS).
171 */ 171 */
172static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val) 172static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val)
173{ 173{
174 struct drm_device *dev = intel_output->base.dev; 174 struct drm_device *dev = intel_encoder->base.dev;
175 struct drm_i915_private *dev_priv = dev->dev_private; 175 struct drm_i915_private *dev_priv = dev->dev_private;
176 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 176 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
177 u32 bval = val, cval = val; 177 u32 bval = val, cval = val;
178 int i; 178 int i;
179 179
180 if (sdvo_priv->output_device == SDVOB) { 180 if (sdvo_priv->sdvo_reg == SDVOB) {
181 cval = I915_READ(SDVOC); 181 cval = I915_READ(SDVOC);
182 } else { 182 } else {
183 bval = I915_READ(SDVOB); 183 bval = I915_READ(SDVOB);
@@ -196,10 +196,10 @@ static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val)
196 } 196 }
197} 197}
198 198
199static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, 199static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr,
200 u8 *ch) 200 u8 *ch)
201{ 201{
202 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 202 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
203 u8 out_buf[2]; 203 u8 out_buf[2];
204 u8 buf[2]; 204 u8 buf[2];
205 int ret; 205 int ret;
@@ -222,7 +222,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
222 out_buf[0] = addr; 222 out_buf[0] = addr;
223 out_buf[1] = 0; 223 out_buf[1] = 0;
224 224
225 if ((ret = i2c_transfer(intel_output->i2c_bus, msgs, 2)) == 2) 225 if ((ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 2)) == 2)
226 { 226 {
227 *ch = buf[0]; 227 *ch = buf[0];
228 return true; 228 return true;
@@ -232,10 +232,10 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
232 return false; 232 return false;
233} 233}
234 234
235static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, 235static bool intel_sdvo_write_byte(struct intel_encoder *intel_encoder, int addr,
236 u8 ch) 236 u8 ch)
237{ 237{
238 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 238 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
239 u8 out_buf[2]; 239 u8 out_buf[2];
240 struct i2c_msg msgs[] = { 240 struct i2c_msg msgs[] = {
241 { 241 {
@@ -249,7 +249,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
249 out_buf[0] = addr; 249 out_buf[0] = addr;
250 out_buf[1] = ch; 250 out_buf[1] = ch;
251 251
252 if (i2c_transfer(intel_output->i2c_bus, msgs, 1) == 1) 252 if (i2c_transfer(intel_encoder->i2c_bus, msgs, 1) == 1)
253 { 253 {
254 return true; 254 return true;
255 } 255 }
@@ -353,13 +353,13 @@ static const struct _sdvo_cmd_name {
353 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), 353 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
354}; 354};
355 355
356#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") 356#define SDVO_NAME(dev_priv) ((dev_priv)->sdvo_reg == SDVOB ? "SDVOB" : "SDVOC")
357#define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv) 357#define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv)
358 358
359static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, 359static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd,
360 void *args, int args_len) 360 void *args, int args_len)
361{ 361{
362 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 362 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
363 int i; 363 int i;
364 364
365 DRM_DEBUG_KMS("%s: W: %02X ", 365 DRM_DEBUG_KMS("%s: W: %02X ",
@@ -379,19 +379,19 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
379 DRM_LOG_KMS("\n"); 379 DRM_LOG_KMS("\n");
380} 380}
381 381
382static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd, 382static void intel_sdvo_write_cmd(struct intel_encoder *intel_encoder, u8 cmd,
383 void *args, int args_len) 383 void *args, int args_len)
384{ 384{
385 int i; 385 int i;
386 386
387 intel_sdvo_debug_write(intel_output, cmd, args, args_len); 387 intel_sdvo_debug_write(intel_encoder, cmd, args, args_len);
388 388
389 for (i = 0; i < args_len; i++) { 389 for (i = 0; i < args_len; i++) {
390 intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0 - i, 390 intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0 - i,
391 ((u8*)args)[i]); 391 ((u8*)args)[i]);
392 } 392 }
393 393
394 intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd); 394 intel_sdvo_write_byte(intel_encoder, SDVO_I2C_OPCODE, cmd);
395} 395}
396 396
397static const char *cmd_status_names[] = { 397static const char *cmd_status_names[] = {
@@ -404,11 +404,11 @@ static const char *cmd_status_names[] = {
404 "Scaling not supported" 404 "Scaling not supported"
405}; 405};
406 406
407static void intel_sdvo_debug_response(struct intel_output *intel_output, 407static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder,
408 void *response, int response_len, 408 void *response, int response_len,
409 u8 status) 409 u8 status)
410{ 410{
411 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 411 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
412 int i; 412 int i;
413 413
414 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv)); 414 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv));
@@ -423,7 +423,7 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output,
423 DRM_LOG_KMS("\n"); 423 DRM_LOG_KMS("\n");
424} 424}
425 425
426static u8 intel_sdvo_read_response(struct intel_output *intel_output, 426static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder,
427 void *response, int response_len) 427 void *response, int response_len)
428{ 428{
429 int i; 429 int i;
@@ -433,16 +433,16 @@ static u8 intel_sdvo_read_response(struct intel_output *intel_output,
433 while (retry--) { 433 while (retry--) {
434 /* Read the command response */ 434 /* Read the command response */
435 for (i = 0; i < response_len; i++) { 435 for (i = 0; i < response_len; i++) {
436 intel_sdvo_read_byte(intel_output, 436 intel_sdvo_read_byte(intel_encoder,
437 SDVO_I2C_RETURN_0 + i, 437 SDVO_I2C_RETURN_0 + i,
438 &((u8 *)response)[i]); 438 &((u8 *)response)[i]);
439 } 439 }
440 440
441 /* read the return status */ 441 /* read the return status */
442 intel_sdvo_read_byte(intel_output, SDVO_I2C_CMD_STATUS, 442 intel_sdvo_read_byte(intel_encoder, SDVO_I2C_CMD_STATUS,
443 &status); 443 &status);
444 444
445 intel_sdvo_debug_response(intel_output, response, response_len, 445 intel_sdvo_debug_response(intel_encoder, response, response_len,
446 status); 446 status);
447 if (status != SDVO_CMD_STATUS_PENDING) 447 if (status != SDVO_CMD_STATUS_PENDING)
448 return status; 448 return status;
@@ -470,10 +470,10 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
470 * another I2C transaction after issuing the DDC bus switch, it will be 470 * another I2C transaction after issuing the DDC bus switch, it will be
471 * switched to the internal SDVO register. 471 * switched to the internal SDVO register.
472 */ 472 */
473static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, 473static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encoder,
474 u8 target) 474 u8 target)
475{ 475{
476 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 476 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
477 u8 out_buf[2], cmd_buf[2], ret_value[2], ret; 477 u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
478 struct i2c_msg msgs[] = { 478 struct i2c_msg msgs[] = {
479 { 479 {
@@ -497,10 +497,10 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
497 }, 497 },
498 }; 498 };
499 499
500 intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, 500 intel_sdvo_debug_write(intel_encoder, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
501 &target, 1); 501 &target, 1);
502 /* write the DDC switch command argument */ 502 /* write the DDC switch command argument */
503 intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target); 503 intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0, target);
504 504
505 out_buf[0] = SDVO_I2C_OPCODE; 505 out_buf[0] = SDVO_I2C_OPCODE;
506 out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; 506 out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
@@ -509,7 +509,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
509 ret_value[0] = 0; 509 ret_value[0] = 0;
510 ret_value[1] = 0; 510 ret_value[1] = 0;
511 511
512 ret = i2c_transfer(intel_output->i2c_bus, msgs, 3); 512 ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 3);
513 if (ret != 3) { 513 if (ret != 3) {
514 /* failure in I2C transfer */ 514 /* failure in I2C transfer */
515 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); 515 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
@@ -523,7 +523,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
523 return; 523 return;
524} 524}
525 525
526static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1) 526static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, bool target_0, bool target_1)
527{ 527{
528 struct intel_sdvo_set_target_input_args targets = {0}; 528 struct intel_sdvo_set_target_input_args targets = {0};
529 u8 status; 529 u8 status;
@@ -534,10 +534,10 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool
534 if (target_1) 534 if (target_1)
535 targets.target_1 = 1; 535 targets.target_1 = 1;
536 536
537 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_INPUT, &targets, 537 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_INPUT, &targets,
538 sizeof(targets)); 538 sizeof(targets));
539 539
540 status = intel_sdvo_read_response(intel_output, NULL, 0); 540 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
541 541
542 return (status == SDVO_CMD_STATUS_SUCCESS); 542 return (status == SDVO_CMD_STATUS_SUCCESS);
543} 543}
@@ -548,13 +548,13 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool
548 * This function is making an assumption about the layout of the response, 548 * This function is making an assumption about the layout of the response,
549 * which should be checked against the docs. 549 * which should be checked against the docs.
550 */ 550 */
551static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, bool *input_1, bool *input_2) 551static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, bool *input_1, bool *input_2)
552{ 552{
553 struct intel_sdvo_get_trained_inputs_response response; 553 struct intel_sdvo_get_trained_inputs_response response;
554 u8 status; 554 u8 status;
555 555
556 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0); 556 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0);
557 status = intel_sdvo_read_response(intel_output, &response, sizeof(response)); 557 status = intel_sdvo_read_response(intel_encoder, &response, sizeof(response));
558 if (status != SDVO_CMD_STATUS_SUCCESS) 558 if (status != SDVO_CMD_STATUS_SUCCESS)
559 return false; 559 return false;
560 560
@@ -563,29 +563,29 @@ static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, boo
563 return true; 563 return true;
564} 564}
565 565
566static bool intel_sdvo_get_active_outputs(struct intel_output *intel_output, 566static bool intel_sdvo_get_active_outputs(struct intel_encoder *intel_encoder,
567 u16 *outputs) 567 u16 *outputs)
568{ 568{
569 u8 status; 569 u8 status;
570 570
571 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0); 571 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0);
572 status = intel_sdvo_read_response(intel_output, outputs, sizeof(*outputs)); 572 status = intel_sdvo_read_response(intel_encoder, outputs, sizeof(*outputs));
573 573
574 return (status == SDVO_CMD_STATUS_SUCCESS); 574 return (status == SDVO_CMD_STATUS_SUCCESS);
575} 575}
576 576
577static bool intel_sdvo_set_active_outputs(struct intel_output *intel_output, 577static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder,
578 u16 outputs) 578 u16 outputs)
579{ 579{
580 u8 status; 580 u8 status;
581 581
582 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, 582 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs,
583 sizeof(outputs)); 583 sizeof(outputs));
584 status = intel_sdvo_read_response(intel_output, NULL, 0); 584 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
585 return (status == SDVO_CMD_STATUS_SUCCESS); 585 return (status == SDVO_CMD_STATUS_SUCCESS);
586} 586}
587 587
588static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output, 588static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encoder,
589 int mode) 589 int mode)
590{ 590{
591 u8 status, state = SDVO_ENCODER_STATE_ON; 591 u8 status, state = SDVO_ENCODER_STATE_ON;
@@ -605,24 +605,24 @@ static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output
605 break; 605 break;
606 } 606 }
607 607
608 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, 608 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
609 sizeof(state)); 609 sizeof(state));
610 status = intel_sdvo_read_response(intel_output, NULL, 0); 610 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
611 611
612 return (status == SDVO_CMD_STATUS_SUCCESS); 612 return (status == SDVO_CMD_STATUS_SUCCESS);
613} 613}
614 614
615static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_output, 615static bool intel_sdvo_get_input_pixel_clock_range(struct intel_encoder *intel_encoder,
616 int *clock_min, 616 int *clock_min,
617 int *clock_max) 617 int *clock_max)
618{ 618{
619 struct intel_sdvo_pixel_clock_range clocks; 619 struct intel_sdvo_pixel_clock_range clocks;
620 u8 status; 620 u8 status;
621 621
622 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, 622 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
623 NULL, 0); 623 NULL, 0);
624 624
625 status = intel_sdvo_read_response(intel_output, &clocks, sizeof(clocks)); 625 status = intel_sdvo_read_response(intel_encoder, &clocks, sizeof(clocks));
626 626
627 if (status != SDVO_CMD_STATUS_SUCCESS) 627 if (status != SDVO_CMD_STATUS_SUCCESS)
628 return false; 628 return false;
@@ -634,31 +634,31 @@ static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_ou
634 return true; 634 return true;
635} 635}
636 636
637static bool intel_sdvo_set_target_output(struct intel_output *intel_output, 637static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder,
638 u16 outputs) 638 u16 outputs)
639{ 639{
640 u8 status; 640 u8 status;
641 641
642 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, 642 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_OUTPUT, &outputs,
643 sizeof(outputs)); 643 sizeof(outputs));
644 644
645 status = intel_sdvo_read_response(intel_output, NULL, 0); 645 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
646 return (status == SDVO_CMD_STATUS_SUCCESS); 646 return (status == SDVO_CMD_STATUS_SUCCESS);
647} 647}
648 648
649static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd, 649static bool intel_sdvo_get_timing(struct intel_encoder *intel_encoder, u8 cmd,
650 struct intel_sdvo_dtd *dtd) 650 struct intel_sdvo_dtd *dtd)
651{ 651{
652 u8 status; 652 u8 status;
653 653
654 intel_sdvo_write_cmd(intel_output, cmd, NULL, 0); 654 intel_sdvo_write_cmd(intel_encoder, cmd, NULL, 0);
655 status = intel_sdvo_read_response(intel_output, &dtd->part1, 655 status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
656 sizeof(dtd->part1)); 656 sizeof(dtd->part1));
657 if (status != SDVO_CMD_STATUS_SUCCESS) 657 if (status != SDVO_CMD_STATUS_SUCCESS)
658 return false; 658 return false;
659 659
660 intel_sdvo_write_cmd(intel_output, cmd + 1, NULL, 0); 660 intel_sdvo_write_cmd(intel_encoder, cmd + 1, NULL, 0);
661 status = intel_sdvo_read_response(intel_output, &dtd->part2, 661 status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
662 sizeof(dtd->part2)); 662 sizeof(dtd->part2));
663 if (status != SDVO_CMD_STATUS_SUCCESS) 663 if (status != SDVO_CMD_STATUS_SUCCESS)
664 return false; 664 return false;
@@ -666,60 +666,60 @@ static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd,
666 return true; 666 return true;
667} 667}
668 668
669static bool intel_sdvo_get_input_timing(struct intel_output *intel_output, 669static bool intel_sdvo_get_input_timing(struct intel_encoder *intel_encoder,
670 struct intel_sdvo_dtd *dtd) 670 struct intel_sdvo_dtd *dtd)
671{ 671{
672 return intel_sdvo_get_timing(intel_output, 672 return intel_sdvo_get_timing(intel_encoder,
673 SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd); 673 SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
674} 674}
675 675
676static bool intel_sdvo_get_output_timing(struct intel_output *intel_output, 676static bool intel_sdvo_get_output_timing(struct intel_encoder *intel_encoder,
677 struct intel_sdvo_dtd *dtd) 677 struct intel_sdvo_dtd *dtd)
678{ 678{
679 return intel_sdvo_get_timing(intel_output, 679 return intel_sdvo_get_timing(intel_encoder,
680 SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd); 680 SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd);
681} 681}
682 682
683static bool intel_sdvo_set_timing(struct intel_output *intel_output, u8 cmd, 683static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd,
684 struct intel_sdvo_dtd *dtd) 684 struct intel_sdvo_dtd *dtd)
685{ 685{
686 u8 status; 686 u8 status;
687 687
688 intel_sdvo_write_cmd(intel_output, cmd, &dtd->part1, sizeof(dtd->part1)); 688 intel_sdvo_write_cmd(intel_encoder, cmd, &dtd->part1, sizeof(dtd->part1));
689 status = intel_sdvo_read_response(intel_output, NULL, 0); 689 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
690 if (status != SDVO_CMD_STATUS_SUCCESS) 690 if (status != SDVO_CMD_STATUS_SUCCESS)
691 return false; 691 return false;
692 692
693 intel_sdvo_write_cmd(intel_output, cmd + 1, &dtd->part2, sizeof(dtd->part2)); 693 intel_sdvo_write_cmd(intel_encoder, cmd + 1, &dtd->part2, sizeof(dtd->part2));
694 status = intel_sdvo_read_response(intel_output, NULL, 0); 694 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
695 if (status != SDVO_CMD_STATUS_SUCCESS) 695 if (status != SDVO_CMD_STATUS_SUCCESS)
696 return false; 696 return false;
697 697
698 return true; 698 return true;
699} 699}
700 700
701static bool intel_sdvo_set_input_timing(struct intel_output *intel_output, 701static bool intel_sdvo_set_input_timing(struct intel_encoder *intel_encoder,
702 struct intel_sdvo_dtd *dtd) 702 struct intel_sdvo_dtd *dtd)
703{ 703{
704 return intel_sdvo_set_timing(intel_output, 704 return intel_sdvo_set_timing(intel_encoder,
705 SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); 705 SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
706} 706}
707 707
708static bool intel_sdvo_set_output_timing(struct intel_output *intel_output, 708static bool intel_sdvo_set_output_timing(struct intel_encoder *intel_encoder,
709 struct intel_sdvo_dtd *dtd) 709 struct intel_sdvo_dtd *dtd)
710{ 710{
711 return intel_sdvo_set_timing(intel_output, 711 return intel_sdvo_set_timing(intel_encoder,
712 SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); 712 SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
713} 713}
714 714
715static bool 715static bool
716intel_sdvo_create_preferred_input_timing(struct intel_output *output, 716intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder,
717 uint16_t clock, 717 uint16_t clock,
718 uint16_t width, 718 uint16_t width,
719 uint16_t height) 719 uint16_t height)
720{ 720{
721 struct intel_sdvo_preferred_input_timing_args args; 721 struct intel_sdvo_preferred_input_timing_args args;
722 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 722 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
723 uint8_t status; 723 uint8_t status;
724 724
725 memset(&args, 0, sizeof(args)); 725 memset(&args, 0, sizeof(args));
@@ -733,32 +733,33 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output,
733 sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height)) 733 sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height))
734 args.scaled = 1; 734 args.scaled = 1;
735 735
736 intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, 736 intel_sdvo_write_cmd(intel_encoder,
737 SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
737 &args, sizeof(args)); 738 &args, sizeof(args));
738 status = intel_sdvo_read_response(output, NULL, 0); 739 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
739 if (status != SDVO_CMD_STATUS_SUCCESS) 740 if (status != SDVO_CMD_STATUS_SUCCESS)
740 return false; 741 return false;
741 742
742 return true; 743 return true;
743} 744}
744 745
745static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output, 746static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_encoder,
746 struct intel_sdvo_dtd *dtd) 747 struct intel_sdvo_dtd *dtd)
747{ 748{
748 bool status; 749 bool status;
749 750
750 intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, 751 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
751 NULL, 0); 752 NULL, 0);
752 753
753 status = intel_sdvo_read_response(output, &dtd->part1, 754 status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
754 sizeof(dtd->part1)); 755 sizeof(dtd->part1));
755 if (status != SDVO_CMD_STATUS_SUCCESS) 756 if (status != SDVO_CMD_STATUS_SUCCESS)
756 return false; 757 return false;
757 758
758 intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, 759 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
759 NULL, 0); 760 NULL, 0);
760 761
761 status = intel_sdvo_read_response(output, &dtd->part2, 762 status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
762 sizeof(dtd->part2)); 763 sizeof(dtd->part2));
763 if (status != SDVO_CMD_STATUS_SUCCESS) 764 if (status != SDVO_CMD_STATUS_SUCCESS)
764 return false; 765 return false;
@@ -766,12 +767,12 @@ static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output,
766 return false; 767 return false;
767} 768}
768 769
769static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output) 770static int intel_sdvo_get_clock_rate_mult(struct intel_encoder *intel_encoder)
770{ 771{
771 u8 response, status; 772 u8 response, status;
772 773
773 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0); 774 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0);
774 status = intel_sdvo_read_response(intel_output, &response, 1); 775 status = intel_sdvo_read_response(intel_encoder, &response, 1);
775 776
776 if (status != SDVO_CMD_STATUS_SUCCESS) { 777 if (status != SDVO_CMD_STATUS_SUCCESS) {
777 DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n"); 778 DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n");
@@ -783,12 +784,12 @@ static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output)
783 return response; 784 return response;
784} 785}
785 786
786static bool intel_sdvo_set_clock_rate_mult(struct intel_output *intel_output, u8 val) 787static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val)
787{ 788{
788 u8 status; 789 u8 status;
789 790
790 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); 791 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
791 status = intel_sdvo_read_response(intel_output, NULL, 0); 792 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
792 if (status != SDVO_CMD_STATUS_SUCCESS) 793 if (status != SDVO_CMD_STATUS_SUCCESS)
793 return false; 794 return false;
794 795
@@ -877,13 +878,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
877 mode->flags |= DRM_MODE_FLAG_PVSYNC; 878 mode->flags |= DRM_MODE_FLAG_PVSYNC;
878} 879}
879 880
880static bool intel_sdvo_get_supp_encode(struct intel_output *output, 881static bool intel_sdvo_get_supp_encode(struct intel_encoder *intel_encoder,
881 struct intel_sdvo_encode *encode) 882 struct intel_sdvo_encode *encode)
882{ 883{
883 uint8_t status; 884 uint8_t status;
884 885
885 intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0); 886 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0);
886 status = intel_sdvo_read_response(output, encode, sizeof(*encode)); 887 status = intel_sdvo_read_response(intel_encoder, encode, sizeof(*encode));
887 if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */ 888 if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */
888 memset(encode, 0, sizeof(*encode)); 889 memset(encode, 0, sizeof(*encode));
889 return false; 890 return false;
@@ -892,29 +893,30 @@ static bool intel_sdvo_get_supp_encode(struct intel_output *output,
892 return true; 893 return true;
893} 894}
894 895
895static bool intel_sdvo_set_encode(struct intel_output *output, uint8_t mode) 896static bool intel_sdvo_set_encode(struct intel_encoder *intel_encoder,
897 uint8_t mode)
896{ 898{
897 uint8_t status; 899 uint8_t status;
898 900
899 intel_sdvo_write_cmd(output, SDVO_CMD_SET_ENCODE, &mode, 1); 901 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODE, &mode, 1);
900 status = intel_sdvo_read_response(output, NULL, 0); 902 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
901 903
902 return (status == SDVO_CMD_STATUS_SUCCESS); 904 return (status == SDVO_CMD_STATUS_SUCCESS);
903} 905}
904 906
905static bool intel_sdvo_set_colorimetry(struct intel_output *output, 907static bool intel_sdvo_set_colorimetry(struct intel_encoder *intel_encoder,
906 uint8_t mode) 908 uint8_t mode)
907{ 909{
908 uint8_t status; 910 uint8_t status;
909 911
910 intel_sdvo_write_cmd(output, SDVO_CMD_SET_COLORIMETRY, &mode, 1); 912 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
911 status = intel_sdvo_read_response(output, NULL, 0); 913 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
912 914
913 return (status == SDVO_CMD_STATUS_SUCCESS); 915 return (status == SDVO_CMD_STATUS_SUCCESS);
914} 916}
915 917
916#if 0 918#if 0
917static void intel_sdvo_dump_hdmi_buf(struct intel_output *output) 919static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder)
918{ 920{
919 int i, j; 921 int i, j;
920 uint8_t set_buf_index[2]; 922 uint8_t set_buf_index[2];
@@ -923,43 +925,45 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_output *output)
923 uint8_t buf[48]; 925 uint8_t buf[48];
924 uint8_t *pos; 926 uint8_t *pos;
925 927
926 intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0); 928 intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0);
927 intel_sdvo_read_response(output, &av_split, 1); 929 intel_sdvo_read_response(encoder, &av_split, 1);
928 930
929 for (i = 0; i <= av_split; i++) { 931 for (i = 0; i <= av_split; i++) {
930 set_buf_index[0] = i; set_buf_index[1] = 0; 932 set_buf_index[0] = i; set_buf_index[1] = 0;
931 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, 933 intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
932 set_buf_index, 2); 934 set_buf_index, 2);
933 intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_INFO, NULL, 0); 935 intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
934 intel_sdvo_read_response(output, &buf_size, 1); 936 intel_sdvo_read_response(encoder, &buf_size, 1);
935 937
936 pos = buf; 938 pos = buf;
937 for (j = 0; j <= buf_size; j += 8) { 939 for (j = 0; j <= buf_size; j += 8) {
938 intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_DATA, 940 intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
939 NULL, 0); 941 NULL, 0);
940 intel_sdvo_read_response(output, pos, 8); 942 intel_sdvo_read_response(encoder, pos, 8);
941 pos += 8; 943 pos += 8;
942 } 944 }
943 } 945 }
944} 946}
945#endif 947#endif
946 948
947static void intel_sdvo_set_hdmi_buf(struct intel_output *output, int index, 949static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder,
948 uint8_t *data, int8_t size, uint8_t tx_rate) 950 int index,
951 uint8_t *data, int8_t size, uint8_t tx_rate)
949{ 952{
950 uint8_t set_buf_index[2]; 953 uint8_t set_buf_index[2];
951 954
952 set_buf_index[0] = index; 955 set_buf_index[0] = index;
953 set_buf_index[1] = 0; 956 set_buf_index[1] = 0;
954 957
955 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2); 958 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_INDEX,
959 set_buf_index, 2);
956 960
957 for (; size > 0; size -= 8) { 961 for (; size > 0; size -= 8) {
958 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_DATA, data, 8); 962 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_DATA, data, 8);
959 data += 8; 963 data += 8;
960 } 964 }
961 965
962 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); 966 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
963} 967}
964 968
965static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) 969static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
@@ -1034,7 +1038,7 @@ struct dip_infoframe {
1034 } __attribute__ ((packed)) u; 1038 } __attribute__ ((packed)) u;
1035} __attribute__((packed)); 1039} __attribute__((packed));
1036 1040
1037static void intel_sdvo_set_avi_infoframe(struct intel_output *output, 1041static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder,
1038 struct drm_display_mode * mode) 1042 struct drm_display_mode * mode)
1039{ 1043{
1040 struct dip_infoframe avi_if = { 1044 struct dip_infoframe avi_if = {
@@ -1045,15 +1049,16 @@ static void intel_sdvo_set_avi_infoframe(struct intel_output *output,
1045 1049
1046 avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, 1050 avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if,
1047 4 + avi_if.len); 1051 4 + avi_if.len);
1048 intel_sdvo_set_hdmi_buf(output, 1, (uint8_t *)&avi_if, 4 + avi_if.len, 1052 intel_sdvo_set_hdmi_buf(intel_encoder, 1, (uint8_t *)&avi_if,
1053 4 + avi_if.len,
1049 SDVO_HBUF_TX_VSYNC); 1054 SDVO_HBUF_TX_VSYNC);
1050} 1055}
1051 1056
1052static void intel_sdvo_set_tv_format(struct intel_output *output) 1057static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder)
1053{ 1058{
1054 1059
1055 struct intel_sdvo_tv_format format; 1060 struct intel_sdvo_tv_format format;
1056 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 1061 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1057 uint32_t format_map, i; 1062 uint32_t format_map, i;
1058 uint8_t status; 1063 uint8_t status;
1059 1064
@@ -1066,10 +1071,10 @@ static void intel_sdvo_set_tv_format(struct intel_output *output)
1066 memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ? 1071 memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ?
1067 sizeof(format) : sizeof(format_map)); 1072 sizeof(format) : sizeof(format_map));
1068 1073
1069 intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, &format_map, 1074 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format_map,
1070 sizeof(format)); 1075 sizeof(format));
1071 1076
1072 status = intel_sdvo_read_response(output, NULL, 0); 1077 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
1073 if (status != SDVO_CMD_STATUS_SUCCESS) 1078 if (status != SDVO_CMD_STATUS_SUCCESS)
1074 DRM_DEBUG_KMS("%s: Failed to set TV format\n", 1079 DRM_DEBUG_KMS("%s: Failed to set TV format\n",
1075 SDVO_NAME(sdvo_priv)); 1080 SDVO_NAME(sdvo_priv));
@@ -1079,8 +1084,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1079 struct drm_display_mode *mode, 1084 struct drm_display_mode *mode,
1080 struct drm_display_mode *adjusted_mode) 1085 struct drm_display_mode *adjusted_mode)
1081{ 1086{
1082 struct intel_output *output = enc_to_intel_output(encoder); 1087 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1083 struct intel_sdvo_priv *dev_priv = output->dev_priv; 1088 struct intel_sdvo_priv *dev_priv = intel_encoder->dev_priv;
1084 1089
1085 if (dev_priv->is_tv) { 1090 if (dev_priv->is_tv) {
1086 struct intel_sdvo_dtd output_dtd; 1091 struct intel_sdvo_dtd output_dtd;
@@ -1095,22 +1100,22 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1095 1100
1096 /* Set output timings */ 1101 /* Set output timings */
1097 intel_sdvo_get_dtd_from_mode(&output_dtd, mode); 1102 intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
1098 intel_sdvo_set_target_output(output, 1103 intel_sdvo_set_target_output(intel_encoder,
1099 dev_priv->controlled_output); 1104 dev_priv->controlled_output);
1100 intel_sdvo_set_output_timing(output, &output_dtd); 1105 intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
1101 1106
1102 /* Set the input timing to the screen. Assume always input 0. */ 1107 /* Set the input timing to the screen. Assume always input 0. */
1103 intel_sdvo_set_target_input(output, true, false); 1108 intel_sdvo_set_target_input(intel_encoder, true, false);
1104 1109
1105 1110
1106 success = intel_sdvo_create_preferred_input_timing(output, 1111 success = intel_sdvo_create_preferred_input_timing(intel_encoder,
1107 mode->clock / 10, 1112 mode->clock / 10,
1108 mode->hdisplay, 1113 mode->hdisplay,
1109 mode->vdisplay); 1114 mode->vdisplay);
1110 if (success) { 1115 if (success) {
1111 struct intel_sdvo_dtd input_dtd; 1116 struct intel_sdvo_dtd input_dtd;
1112 1117
1113 intel_sdvo_get_preferred_input_timing(output, 1118 intel_sdvo_get_preferred_input_timing(intel_encoder,
1114 &input_dtd); 1119 &input_dtd);
1115 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); 1120 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
1116 dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; 1121 dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
@@ -1133,16 +1138,16 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1133 intel_sdvo_get_dtd_from_mode(&output_dtd, 1138 intel_sdvo_get_dtd_from_mode(&output_dtd,
1134 dev_priv->sdvo_lvds_fixed_mode); 1139 dev_priv->sdvo_lvds_fixed_mode);
1135 1140
1136 intel_sdvo_set_target_output(output, 1141 intel_sdvo_set_target_output(intel_encoder,
1137 dev_priv->controlled_output); 1142 dev_priv->controlled_output);
1138 intel_sdvo_set_output_timing(output, &output_dtd); 1143 intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
1139 1144
1140 /* Set the input timing to the screen. Assume always input 0. */ 1145 /* Set the input timing to the screen. Assume always input 0. */
1141 intel_sdvo_set_target_input(output, true, false); 1146 intel_sdvo_set_target_input(intel_encoder, true, false);
1142 1147
1143 1148
1144 success = intel_sdvo_create_preferred_input_timing( 1149 success = intel_sdvo_create_preferred_input_timing(
1145 output, 1150 intel_encoder,
1146 mode->clock / 10, 1151 mode->clock / 10,
1147 mode->hdisplay, 1152 mode->hdisplay,
1148 mode->vdisplay); 1153 mode->vdisplay);
@@ -1150,7 +1155,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1150 if (success) { 1155 if (success) {
1151 struct intel_sdvo_dtd input_dtd; 1156 struct intel_sdvo_dtd input_dtd;
1152 1157
1153 intel_sdvo_get_preferred_input_timing(output, 1158 intel_sdvo_get_preferred_input_timing(intel_encoder,
1154 &input_dtd); 1159 &input_dtd);
1155 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); 1160 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
1156 dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; 1161 dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
@@ -1182,8 +1187,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1182 struct drm_i915_private *dev_priv = dev->dev_private; 1187 struct drm_i915_private *dev_priv = dev->dev_private;
1183 struct drm_crtc *crtc = encoder->crtc; 1188 struct drm_crtc *crtc = encoder->crtc;
1184 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1189 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1185 struct intel_output *output = enc_to_intel_output(encoder); 1190 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1186 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 1191 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1187 u32 sdvox = 0; 1192 u32 sdvox = 0;
1188 int sdvo_pixel_multiply; 1193 int sdvo_pixel_multiply;
1189 struct intel_sdvo_in_out_map in_out; 1194 struct intel_sdvo_in_out_map in_out;
@@ -1202,12 +1207,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1202 in_out.in0 = sdvo_priv->controlled_output; 1207 in_out.in0 = sdvo_priv->controlled_output;
1203 in_out.in1 = 0; 1208 in_out.in1 = 0;
1204 1209
1205 intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, 1210 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP,
1206 &in_out, sizeof(in_out)); 1211 &in_out, sizeof(in_out));
1207 status = intel_sdvo_read_response(output, NULL, 0); 1212 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
1208 1213
1209 if (sdvo_priv->is_hdmi) { 1214 if (sdvo_priv->is_hdmi) {
1210 intel_sdvo_set_avi_infoframe(output, mode); 1215 intel_sdvo_set_avi_infoframe(intel_encoder, mode);
1211 sdvox |= SDVO_AUDIO_ENABLE; 1216 sdvox |= SDVO_AUDIO_ENABLE;
1212 } 1217 }
1213 1218
@@ -1224,16 +1229,16 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1224 */ 1229 */
1225 if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { 1230 if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) {
1226 /* Set the output timing to the screen */ 1231 /* Set the output timing to the screen */
1227 intel_sdvo_set_target_output(output, 1232 intel_sdvo_set_target_output(intel_encoder,
1228 sdvo_priv->controlled_output); 1233 sdvo_priv->controlled_output);
1229 intel_sdvo_set_output_timing(output, &input_dtd); 1234 intel_sdvo_set_output_timing(intel_encoder, &input_dtd);
1230 } 1235 }
1231 1236
1232 /* Set the input timing to the screen. Assume always input 0. */ 1237 /* Set the input timing to the screen. Assume always input 0. */
1233 intel_sdvo_set_target_input(output, true, false); 1238 intel_sdvo_set_target_input(intel_encoder, true, false);
1234 1239
1235 if (sdvo_priv->is_tv) 1240 if (sdvo_priv->is_tv)
1236 intel_sdvo_set_tv_format(output); 1241 intel_sdvo_set_tv_format(intel_encoder);
1237 1242
1238 /* We would like to use intel_sdvo_create_preferred_input_timing() to 1243 /* We would like to use intel_sdvo_create_preferred_input_timing() to
1239 * provide the device with a timing it can support, if it supports that 1244 * provide the device with a timing it can support, if it supports that
@@ -1241,29 +1246,29 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1241 * output the preferred timing, and we don't support that currently. 1246 * output the preferred timing, and we don't support that currently.
1242 */ 1247 */
1243#if 0 1248#if 0
1244 success = intel_sdvo_create_preferred_input_timing(output, clock, 1249 success = intel_sdvo_create_preferred_input_timing(encoder, clock,
1245 width, height); 1250 width, height);
1246 if (success) { 1251 if (success) {
1247 struct intel_sdvo_dtd *input_dtd; 1252 struct intel_sdvo_dtd *input_dtd;
1248 1253
1249 intel_sdvo_get_preferred_input_timing(output, &input_dtd); 1254 intel_sdvo_get_preferred_input_timing(encoder, &input_dtd);
1250 intel_sdvo_set_input_timing(output, &input_dtd); 1255 intel_sdvo_set_input_timing(encoder, &input_dtd);
1251 } 1256 }
1252#else 1257#else
1253 intel_sdvo_set_input_timing(output, &input_dtd); 1258 intel_sdvo_set_input_timing(intel_encoder, &input_dtd);
1254#endif 1259#endif
1255 1260
1256 switch (intel_sdvo_get_pixel_multiplier(mode)) { 1261 switch (intel_sdvo_get_pixel_multiplier(mode)) {
1257 case 1: 1262 case 1:
1258 intel_sdvo_set_clock_rate_mult(output, 1263 intel_sdvo_set_clock_rate_mult(intel_encoder,
1259 SDVO_CLOCK_RATE_MULT_1X); 1264 SDVO_CLOCK_RATE_MULT_1X);
1260 break; 1265 break;
1261 case 2: 1266 case 2:
1262 intel_sdvo_set_clock_rate_mult(output, 1267 intel_sdvo_set_clock_rate_mult(intel_encoder,
1263 SDVO_CLOCK_RATE_MULT_2X); 1268 SDVO_CLOCK_RATE_MULT_2X);
1264 break; 1269 break;
1265 case 4: 1270 case 4:
1266 intel_sdvo_set_clock_rate_mult(output, 1271 intel_sdvo_set_clock_rate_mult(intel_encoder,
1267 SDVO_CLOCK_RATE_MULT_4X); 1272 SDVO_CLOCK_RATE_MULT_4X);
1268 break; 1273 break;
1269 } 1274 }
@@ -1274,8 +1279,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1274 SDVO_VSYNC_ACTIVE_HIGH | 1279 SDVO_VSYNC_ACTIVE_HIGH |
1275 SDVO_HSYNC_ACTIVE_HIGH; 1280 SDVO_HSYNC_ACTIVE_HIGH;
1276 } else { 1281 } else {
1277 sdvox |= I915_READ(sdvo_priv->output_device); 1282 sdvox |= I915_READ(sdvo_priv->sdvo_reg);
1278 switch (sdvo_priv->output_device) { 1283 switch (sdvo_priv->sdvo_reg) {
1279 case SDVOB: 1284 case SDVOB:
1280 sdvox &= SDVOB_PRESERVE_MASK; 1285 sdvox &= SDVOB_PRESERVE_MASK;
1281 break; 1286 break;
@@ -1299,26 +1304,26 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1299 1304
1300 if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL) 1305 if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL)
1301 sdvox |= SDVO_STALL_SELECT; 1306 sdvox |= SDVO_STALL_SELECT;
1302 intel_sdvo_write_sdvox(output, sdvox); 1307 intel_sdvo_write_sdvox(intel_encoder, sdvox);
1303} 1308}
1304 1309
1305static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) 1310static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1306{ 1311{
1307 struct drm_device *dev = encoder->dev; 1312 struct drm_device *dev = encoder->dev;
1308 struct drm_i915_private *dev_priv = dev->dev_private; 1313 struct drm_i915_private *dev_priv = dev->dev_private;
1309 struct intel_output *intel_output = enc_to_intel_output(encoder); 1314 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1310 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1315 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1311 u32 temp; 1316 u32 temp;
1312 1317
1313 if (mode != DRM_MODE_DPMS_ON) { 1318 if (mode != DRM_MODE_DPMS_ON) {
1314 intel_sdvo_set_active_outputs(intel_output, 0); 1319 intel_sdvo_set_active_outputs(intel_encoder, 0);
1315 if (0) 1320 if (0)
1316 intel_sdvo_set_encoder_power_state(intel_output, mode); 1321 intel_sdvo_set_encoder_power_state(intel_encoder, mode);
1317 1322
1318 if (mode == DRM_MODE_DPMS_OFF) { 1323 if (mode == DRM_MODE_DPMS_OFF) {
1319 temp = I915_READ(sdvo_priv->output_device); 1324 temp = I915_READ(sdvo_priv->sdvo_reg);
1320 if ((temp & SDVO_ENABLE) != 0) { 1325 if ((temp & SDVO_ENABLE) != 0) {
1321 intel_sdvo_write_sdvox(intel_output, temp & ~SDVO_ENABLE); 1326 intel_sdvo_write_sdvox(intel_encoder, temp & ~SDVO_ENABLE);
1322 } 1327 }
1323 } 1328 }
1324 } else { 1329 } else {
@@ -1326,13 +1331,13 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1326 int i; 1331 int i;
1327 u8 status; 1332 u8 status;
1328 1333
1329 temp = I915_READ(sdvo_priv->output_device); 1334 temp = I915_READ(sdvo_priv->sdvo_reg);
1330 if ((temp & SDVO_ENABLE) == 0) 1335 if ((temp & SDVO_ENABLE) == 0)
1331 intel_sdvo_write_sdvox(intel_output, temp | SDVO_ENABLE); 1336 intel_sdvo_write_sdvox(intel_encoder, temp | SDVO_ENABLE);
1332 for (i = 0; i < 2; i++) 1337 for (i = 0; i < 2; i++)
1333 intel_wait_for_vblank(dev); 1338 intel_wait_for_vblank(dev);
1334 1339
1335 status = intel_sdvo_get_trained_inputs(intel_output, &input1, 1340 status = intel_sdvo_get_trained_inputs(intel_encoder, &input1,
1336 &input2); 1341 &input2);
1337 1342
1338 1343
@@ -1346,8 +1351,8 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1346 } 1351 }
1347 1352
1348 if (0) 1353 if (0)
1349 intel_sdvo_set_encoder_power_state(intel_output, mode); 1354 intel_sdvo_set_encoder_power_state(intel_encoder, mode);
1350 intel_sdvo_set_active_outputs(intel_output, sdvo_priv->controlled_output); 1355 intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->controlled_output);
1351 } 1356 }
1352 return; 1357 return;
1353} 1358}
@@ -1356,22 +1361,22 @@ static void intel_sdvo_save(struct drm_connector *connector)
1356{ 1361{
1357 struct drm_device *dev = connector->dev; 1362 struct drm_device *dev = connector->dev;
1358 struct drm_i915_private *dev_priv = dev->dev_private; 1363 struct drm_i915_private *dev_priv = dev->dev_private;
1359 struct intel_output *intel_output = to_intel_output(connector); 1364 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1360 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1365 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1361 int o; 1366 int o;
1362 1367
1363 sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_output); 1368 sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_encoder);
1364 intel_sdvo_get_active_outputs(intel_output, &sdvo_priv->save_active_outputs); 1369 intel_sdvo_get_active_outputs(intel_encoder, &sdvo_priv->save_active_outputs);
1365 1370
1366 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { 1371 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
1367 intel_sdvo_set_target_input(intel_output, true, false); 1372 intel_sdvo_set_target_input(intel_encoder, true, false);
1368 intel_sdvo_get_input_timing(intel_output, 1373 intel_sdvo_get_input_timing(intel_encoder,
1369 &sdvo_priv->save_input_dtd_1); 1374 &sdvo_priv->save_input_dtd_1);
1370 } 1375 }
1371 1376
1372 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { 1377 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
1373 intel_sdvo_set_target_input(intel_output, false, true); 1378 intel_sdvo_set_target_input(intel_encoder, false, true);
1374 intel_sdvo_get_input_timing(intel_output, 1379 intel_sdvo_get_input_timing(intel_encoder,
1375 &sdvo_priv->save_input_dtd_2); 1380 &sdvo_priv->save_input_dtd_2);
1376 } 1381 }
1377 1382
@@ -1380,8 +1385,8 @@ static void intel_sdvo_save(struct drm_connector *connector)
1380 u16 this_output = (1 << o); 1385 u16 this_output = (1 << o);
1381 if (sdvo_priv->caps.output_flags & this_output) 1386 if (sdvo_priv->caps.output_flags & this_output)
1382 { 1387 {
1383 intel_sdvo_set_target_output(intel_output, this_output); 1388 intel_sdvo_set_target_output(intel_encoder, this_output);
1384 intel_sdvo_get_output_timing(intel_output, 1389 intel_sdvo_get_output_timing(intel_encoder,
1385 &sdvo_priv->save_output_dtd[o]); 1390 &sdvo_priv->save_output_dtd[o]);
1386 } 1391 }
1387 } 1392 }
@@ -1389,66 +1394,66 @@ static void intel_sdvo_save(struct drm_connector *connector)
1389 /* XXX: Save TV format/enhancements. */ 1394 /* XXX: Save TV format/enhancements. */
1390 } 1395 }
1391 1396
1392 sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device); 1397 sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->sdvo_reg);
1393} 1398}
1394 1399
1395static void intel_sdvo_restore(struct drm_connector *connector) 1400static void intel_sdvo_restore(struct drm_connector *connector)
1396{ 1401{
1397 struct drm_device *dev = connector->dev; 1402 struct drm_device *dev = connector->dev;
1398 struct intel_output *intel_output = to_intel_output(connector); 1403 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1399 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1404 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1400 int o; 1405 int o;
1401 int i; 1406 int i;
1402 bool input1, input2; 1407 bool input1, input2;
1403 u8 status; 1408 u8 status;
1404 1409
1405 intel_sdvo_set_active_outputs(intel_output, 0); 1410 intel_sdvo_set_active_outputs(intel_encoder, 0);
1406 1411
1407 for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) 1412 for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
1408 { 1413 {
1409 u16 this_output = (1 << o); 1414 u16 this_output = (1 << o);
1410 if (sdvo_priv->caps.output_flags & this_output) { 1415 if (sdvo_priv->caps.output_flags & this_output) {
1411 intel_sdvo_set_target_output(intel_output, this_output); 1416 intel_sdvo_set_target_output(intel_encoder, this_output);
1412 intel_sdvo_set_output_timing(intel_output, &sdvo_priv->save_output_dtd[o]); 1417 intel_sdvo_set_output_timing(intel_encoder, &sdvo_priv->save_output_dtd[o]);
1413 } 1418 }
1414 } 1419 }
1415 1420
1416 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { 1421 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
1417 intel_sdvo_set_target_input(intel_output, true, false); 1422 intel_sdvo_set_target_input(intel_encoder, true, false);
1418 intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_1); 1423 intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_1);
1419 } 1424 }
1420 1425
1421 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { 1426 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
1422 intel_sdvo_set_target_input(intel_output, false, true); 1427 intel_sdvo_set_target_input(intel_encoder, false, true);
1423 intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_2); 1428 intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_2);
1424 } 1429 }
1425 1430
1426 intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult); 1431 intel_sdvo_set_clock_rate_mult(intel_encoder, sdvo_priv->save_sdvo_mult);
1427 1432
1428 if (sdvo_priv->is_tv) { 1433 if (sdvo_priv->is_tv) {
1429 /* XXX: Restore TV format/enhancements. */ 1434 /* XXX: Restore TV format/enhancements. */
1430 } 1435 }
1431 1436
1432 intel_sdvo_write_sdvox(intel_output, sdvo_priv->save_SDVOX); 1437 intel_sdvo_write_sdvox(intel_encoder, sdvo_priv->save_SDVOX);
1433 1438
1434 if (sdvo_priv->save_SDVOX & SDVO_ENABLE) 1439 if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
1435 { 1440 {
1436 for (i = 0; i < 2; i++) 1441 for (i = 0; i < 2; i++)
1437 intel_wait_for_vblank(dev); 1442 intel_wait_for_vblank(dev);
1438 status = intel_sdvo_get_trained_inputs(intel_output, &input1, &input2); 1443 status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, &input2);
1439 if (status == SDVO_CMD_STATUS_SUCCESS && !input1) 1444 if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
1440 DRM_DEBUG_KMS("First %s output reported failure to " 1445 DRM_DEBUG_KMS("First %s output reported failure to "
1441 "sync\n", SDVO_NAME(sdvo_priv)); 1446 "sync\n", SDVO_NAME(sdvo_priv));
1442 } 1447 }
1443 1448
1444 intel_sdvo_set_active_outputs(intel_output, sdvo_priv->save_active_outputs); 1449 intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->save_active_outputs);
1445} 1450}
1446 1451
1447static int intel_sdvo_mode_valid(struct drm_connector *connector, 1452static int intel_sdvo_mode_valid(struct drm_connector *connector,
1448 struct drm_display_mode *mode) 1453 struct drm_display_mode *mode)
1449{ 1454{
1450 struct intel_output *intel_output = to_intel_output(connector); 1455 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1451 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1456 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1452 1457
1453 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 1458 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1454 return MODE_NO_DBLESCAN; 1459 return MODE_NO_DBLESCAN;
@@ -1473,12 +1478,12 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
1473 return MODE_OK; 1478 return MODE_OK;
1474} 1479}
1475 1480
1476static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struct intel_sdvo_caps *caps) 1481static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, struct intel_sdvo_caps *caps)
1477{ 1482{
1478 u8 status; 1483 u8 status;
1479 1484
1480 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0); 1485 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0);
1481 status = intel_sdvo_read_response(intel_output, caps, sizeof(*caps)); 1486 status = intel_sdvo_read_response(intel_encoder, caps, sizeof(*caps));
1482 if (status != SDVO_CMD_STATUS_SUCCESS) 1487 if (status != SDVO_CMD_STATUS_SUCCESS)
1483 return false; 1488 return false;
1484 1489
@@ -1488,22 +1493,22 @@ static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struc
1488struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) 1493struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
1489{ 1494{
1490 struct drm_connector *connector = NULL; 1495 struct drm_connector *connector = NULL;
1491 struct intel_output *iout = NULL; 1496 struct intel_encoder *iout = NULL;
1492 struct intel_sdvo_priv *sdvo; 1497 struct intel_sdvo_priv *sdvo;
1493 1498
1494 /* find the sdvo connector */ 1499 /* find the sdvo connector */
1495 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1500 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1496 iout = to_intel_output(connector); 1501 iout = to_intel_encoder(connector);
1497 1502
1498 if (iout->type != INTEL_OUTPUT_SDVO) 1503 if (iout->type != INTEL_OUTPUT_SDVO)
1499 continue; 1504 continue;
1500 1505
1501 sdvo = iout->dev_priv; 1506 sdvo = iout->dev_priv;
1502 1507
1503 if (sdvo->output_device == SDVOB && sdvoB) 1508 if (sdvo->sdvo_reg == SDVOB && sdvoB)
1504 return connector; 1509 return connector;
1505 1510
1506 if (sdvo->output_device == SDVOC && !sdvoB) 1511 if (sdvo->sdvo_reg == SDVOC && !sdvoB)
1507 return connector; 1512 return connector;
1508 1513
1509 } 1514 }
@@ -1515,16 +1520,16 @@ int intel_sdvo_supports_hotplug(struct drm_connector *connector)
1515{ 1520{
1516 u8 response[2]; 1521 u8 response[2];
1517 u8 status; 1522 u8 status;
1518 struct intel_output *intel_output; 1523 struct intel_encoder *intel_encoder;
1519 DRM_DEBUG_KMS("\n"); 1524 DRM_DEBUG_KMS("\n");
1520 1525
1521 if (!connector) 1526 if (!connector)
1522 return 0; 1527 return 0;
1523 1528
1524 intel_output = to_intel_output(connector); 1529 intel_encoder = to_intel_encoder(connector);
1525 1530
1526 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); 1531 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
1527 status = intel_sdvo_read_response(intel_output, &response, 2); 1532 status = intel_sdvo_read_response(intel_encoder, &response, 2);
1528 1533
1529 if (response[0] !=0) 1534 if (response[0] !=0)
1530 return 1; 1535 return 1;
@@ -1536,30 +1541,30 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
1536{ 1541{
1537 u8 response[2]; 1542 u8 response[2];
1538 u8 status; 1543 u8 status;
1539 struct intel_output *intel_output = to_intel_output(connector); 1544 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1540 1545
1541 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); 1546 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
1542 intel_sdvo_read_response(intel_output, &response, 2); 1547 intel_sdvo_read_response(intel_encoder, &response, 2);
1543 1548
1544 if (on) { 1549 if (on) {
1545 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); 1550 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
1546 status = intel_sdvo_read_response(intel_output, &response, 2); 1551 status = intel_sdvo_read_response(intel_encoder, &response, 2);
1547 1552
1548 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); 1553 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
1549 } else { 1554 } else {
1550 response[0] = 0; 1555 response[0] = 0;
1551 response[1] = 0; 1556 response[1] = 0;
1552 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); 1557 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
1553 } 1558 }
1554 1559
1555 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); 1560 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
1556 intel_sdvo_read_response(intel_output, &response, 2); 1561 intel_sdvo_read_response(intel_encoder, &response, 2);
1557} 1562}
1558 1563
1559static bool 1564static bool
1560intel_sdvo_multifunc_encoder(struct intel_output *intel_output) 1565intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder)
1561{ 1566{
1562 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1567 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1563 int caps = 0; 1568 int caps = 0;
1564 1569
1565 if (sdvo_priv->caps.output_flags & 1570 if (sdvo_priv->caps.output_flags &
@@ -1593,11 +1598,11 @@ static struct drm_connector *
1593intel_find_analog_connector(struct drm_device *dev) 1598intel_find_analog_connector(struct drm_device *dev)
1594{ 1599{
1595 struct drm_connector *connector; 1600 struct drm_connector *connector;
1596 struct intel_output *intel_output; 1601 struct intel_encoder *intel_encoder;
1597 1602
1598 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1603 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1599 intel_output = to_intel_output(connector); 1604 intel_encoder = to_intel_encoder(connector);
1600 if (intel_output->type == INTEL_OUTPUT_ANALOG) 1605 if (intel_encoder->type == INTEL_OUTPUT_ANALOG)
1601 return connector; 1606 return connector;
1602 } 1607 }
1603 return NULL; 1608 return NULL;
@@ -1622,16 +1627,16 @@ intel_analog_is_connected(struct drm_device *dev)
1622enum drm_connector_status 1627enum drm_connector_status
1623intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) 1628intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1624{ 1629{
1625 struct intel_output *intel_output = to_intel_output(connector); 1630 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1626 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1631 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1627 enum drm_connector_status status = connector_status_connected; 1632 enum drm_connector_status status = connector_status_connected;
1628 struct edid *edid = NULL; 1633 struct edid *edid = NULL;
1629 1634
1630 edid = drm_get_edid(&intel_output->base, 1635 edid = drm_get_edid(&intel_encoder->base,
1631 intel_output->ddc_bus); 1636 intel_encoder->ddc_bus);
1632 1637
1633 /* This is only applied to SDVO cards with multiple outputs */ 1638 /* This is only applied to SDVO cards with multiple outputs */
1634 if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) { 1639 if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) {
1635 uint8_t saved_ddc, temp_ddc; 1640 uint8_t saved_ddc, temp_ddc;
1636 saved_ddc = sdvo_priv->ddc_bus; 1641 saved_ddc = sdvo_priv->ddc_bus;
1637 temp_ddc = sdvo_priv->ddc_bus >> 1; 1642 temp_ddc = sdvo_priv->ddc_bus >> 1;
@@ -1641,8 +1646,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1641 */ 1646 */
1642 while(temp_ddc > 1) { 1647 while(temp_ddc > 1) {
1643 sdvo_priv->ddc_bus = temp_ddc; 1648 sdvo_priv->ddc_bus = temp_ddc;
1644 edid = drm_get_edid(&intel_output->base, 1649 edid = drm_get_edid(&intel_encoder->base,
1645 intel_output->ddc_bus); 1650 intel_encoder->ddc_bus);
1646 if (edid) { 1651 if (edid) {
1647 /* 1652 /*
1648 * When we can get the EDID, maybe it is the 1653 * When we can get the EDID, maybe it is the
@@ -1661,8 +1666,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1661 */ 1666 */
1662 if (edid == NULL && 1667 if (edid == NULL &&
1663 sdvo_priv->analog_ddc_bus && 1668 sdvo_priv->analog_ddc_bus &&
1664 !intel_analog_is_connected(intel_output->base.dev)) 1669 !intel_analog_is_connected(intel_encoder->base.dev))
1665 edid = drm_get_edid(&intel_output->base, 1670 edid = drm_get_edid(&intel_encoder->base,
1666 sdvo_priv->analog_ddc_bus); 1671 sdvo_priv->analog_ddc_bus);
1667 if (edid != NULL) { 1672 if (edid != NULL) {
1668 /* Don't report the output as connected if it's a DVI-I 1673 /* Don't report the output as connected if it's a DVI-I
@@ -1677,7 +1682,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1677 } 1682 }
1678 1683
1679 kfree(edid); 1684 kfree(edid);
1680 intel_output->base.display_info.raw_edid = NULL; 1685 intel_encoder->base.display_info.raw_edid = NULL;
1681 1686
1682 } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) 1687 } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
1683 status = connector_status_disconnected; 1688 status = connector_status_disconnected;
@@ -1689,16 +1694,16 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1689{ 1694{
1690 uint16_t response; 1695 uint16_t response;
1691 u8 status; 1696 u8 status;
1692 struct intel_output *intel_output = to_intel_output(connector); 1697 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1693 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1698 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1694 1699
1695 intel_sdvo_write_cmd(intel_output, 1700 intel_sdvo_write_cmd(intel_encoder,
1696 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); 1701 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
1697 if (sdvo_priv->is_tv) { 1702 if (sdvo_priv->is_tv) {
1698 /* add 30ms delay when the output type is SDVO-TV */ 1703 /* add 30ms delay when the output type is SDVO-TV */
1699 mdelay(30); 1704 mdelay(30);
1700 } 1705 }
1701 status = intel_sdvo_read_response(intel_output, &response, 2); 1706 status = intel_sdvo_read_response(intel_encoder, &response, 2);
1702 1707
1703 DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); 1708 DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
1704 1709
@@ -1708,10 +1713,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1708 if (response == 0) 1713 if (response == 0)
1709 return connector_status_disconnected; 1714 return connector_status_disconnected;
1710 1715
1711 if (intel_sdvo_multifunc_encoder(intel_output) && 1716 if (intel_sdvo_multifunc_encoder(intel_encoder) &&
1712 sdvo_priv->attached_output != response) { 1717 sdvo_priv->attached_output != response) {
1713 if (sdvo_priv->controlled_output != response && 1718 if (sdvo_priv->controlled_output != response &&
1714 intel_sdvo_output_setup(intel_output, response) != true) 1719 intel_sdvo_output_setup(intel_encoder, response) != true)
1715 return connector_status_unknown; 1720 return connector_status_unknown;
1716 sdvo_priv->attached_output = response; 1721 sdvo_priv->attached_output = response;
1717 } 1722 }
@@ -1720,12 +1725,12 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1720 1725
1721static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) 1726static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1722{ 1727{
1723 struct intel_output *intel_output = to_intel_output(connector); 1728 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1724 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1729 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1725 int num_modes; 1730 int num_modes;
1726 1731
1727 /* set the bus switch and get the modes */ 1732 /* set the bus switch and get the modes */
1728 num_modes = intel_ddc_get_modes(intel_output); 1733 num_modes = intel_ddc_get_modes(intel_encoder);
1729 1734
1730 /* 1735 /*
1731 * Mac mini hack. On this device, the DVI-I connector shares one DDC 1736 * Mac mini hack. On this device, the DVI-I connector shares one DDC
@@ -1735,17 +1740,17 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1735 */ 1740 */
1736 if (num_modes == 0 && 1741 if (num_modes == 0 &&
1737 sdvo_priv->analog_ddc_bus && 1742 sdvo_priv->analog_ddc_bus &&
1738 !intel_analog_is_connected(intel_output->base.dev)) { 1743 !intel_analog_is_connected(intel_encoder->base.dev)) {
1739 struct i2c_adapter *digital_ddc_bus; 1744 struct i2c_adapter *digital_ddc_bus;
1740 1745
1741 /* Switch to the analog ddc bus and try that 1746 /* Switch to the analog ddc bus and try that
1742 */ 1747 */
1743 digital_ddc_bus = intel_output->ddc_bus; 1748 digital_ddc_bus = intel_encoder->ddc_bus;
1744 intel_output->ddc_bus = sdvo_priv->analog_ddc_bus; 1749 intel_encoder->ddc_bus = sdvo_priv->analog_ddc_bus;
1745 1750
1746 (void) intel_ddc_get_modes(intel_output); 1751 (void) intel_ddc_get_modes(intel_encoder);
1747 1752
1748 intel_output->ddc_bus = digital_ddc_bus; 1753 intel_encoder->ddc_bus = digital_ddc_bus;
1749 } 1754 }
1750} 1755}
1751 1756
@@ -1816,7 +1821,7 @@ struct drm_display_mode sdvo_tv_modes[] = {
1816 1821
1817static void intel_sdvo_get_tv_modes(struct drm_connector *connector) 1822static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1818{ 1823{
1819 struct intel_output *output = to_intel_output(connector); 1824 struct intel_encoder *output = to_intel_encoder(connector);
1820 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 1825 struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
1821 struct intel_sdvo_sdtv_resolution_request tv_res; 1826 struct intel_sdvo_sdtv_resolution_request tv_res;
1822 uint32_t reply = 0, format_map = 0; 1827 uint32_t reply = 0, format_map = 0;
@@ -1858,9 +1863,9 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1858 1863
1859static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) 1864static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1860{ 1865{
1861 struct intel_output *intel_output = to_intel_output(connector); 1866 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1862 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1867 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1863 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1868 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1864 struct drm_display_mode *newmode; 1869 struct drm_display_mode *newmode;
1865 1870
1866 /* 1871 /*
@@ -1868,7 +1873,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1868 * Assume that the preferred modes are 1873 * Assume that the preferred modes are
1869 * arranged in priority order. 1874 * arranged in priority order.
1870 */ 1875 */
1871 intel_ddc_get_modes(intel_output); 1876 intel_ddc_get_modes(intel_encoder);
1872 if (list_empty(&connector->probed_modes) == false) 1877 if (list_empty(&connector->probed_modes) == false)
1873 goto end; 1878 goto end;
1874 1879
@@ -1897,7 +1902,7 @@ end:
1897 1902
1898static int intel_sdvo_get_modes(struct drm_connector *connector) 1903static int intel_sdvo_get_modes(struct drm_connector *connector)
1899{ 1904{
1900 struct intel_output *output = to_intel_output(connector); 1905 struct intel_encoder *output = to_intel_encoder(connector);
1901 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 1906 struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
1902 1907
1903 if (sdvo_priv->is_tv) 1908 if (sdvo_priv->is_tv)
@@ -1915,8 +1920,8 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
1915static 1920static
1916void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) 1921void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
1917{ 1922{
1918 struct intel_output *intel_output = to_intel_output(connector); 1923 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1919 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1924 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1920 struct drm_device *dev = connector->dev; 1925 struct drm_device *dev = connector->dev;
1921 1926
1922 if (sdvo_priv->is_tv) { 1927 if (sdvo_priv->is_tv) {
@@ -1953,13 +1958,13 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
1953 1958
1954static void intel_sdvo_destroy(struct drm_connector *connector) 1959static void intel_sdvo_destroy(struct drm_connector *connector)
1955{ 1960{
1956 struct intel_output *intel_output = to_intel_output(connector); 1961 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1957 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1962 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1958 1963
1959 if (intel_output->i2c_bus) 1964 if (intel_encoder->i2c_bus)
1960 intel_i2c_destroy(intel_output->i2c_bus); 1965 intel_i2c_destroy(intel_encoder->i2c_bus);
1961 if (intel_output->ddc_bus) 1966 if (intel_encoder->ddc_bus)
1962 intel_i2c_destroy(intel_output->ddc_bus); 1967 intel_i2c_destroy(intel_encoder->ddc_bus);
1963 if (sdvo_priv->analog_ddc_bus) 1968 if (sdvo_priv->analog_ddc_bus)
1964 intel_i2c_destroy(sdvo_priv->analog_ddc_bus); 1969 intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
1965 1970
@@ -1977,7 +1982,7 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
1977 drm_sysfs_connector_remove(connector); 1982 drm_sysfs_connector_remove(connector);
1978 drm_connector_cleanup(connector); 1983 drm_connector_cleanup(connector);
1979 1984
1980 kfree(intel_output); 1985 kfree(intel_encoder);
1981} 1986}
1982 1987
1983static int 1988static int
@@ -1985,9 +1990,9 @@ intel_sdvo_set_property(struct drm_connector *connector,
1985 struct drm_property *property, 1990 struct drm_property *property,
1986 uint64_t val) 1991 uint64_t val)
1987{ 1992{
1988 struct intel_output *intel_output = to_intel_output(connector); 1993 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1989 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1994 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1990 struct drm_encoder *encoder = &intel_output->enc; 1995 struct drm_encoder *encoder = &intel_encoder->enc;
1991 struct drm_crtc *crtc = encoder->crtc; 1996 struct drm_crtc *crtc = encoder->crtc;
1992 int ret = 0; 1997 int ret = 0;
1993 bool changed = false; 1998 bool changed = false;
@@ -2095,8 +2100,8 @@ intel_sdvo_set_property(struct drm_connector *connector,
2095 sdvo_priv->cur_brightness = temp_value; 2100 sdvo_priv->cur_brightness = temp_value;
2096 } 2101 }
2097 if (cmd) { 2102 if (cmd) {
2098 intel_sdvo_write_cmd(intel_output, cmd, &temp_value, 2); 2103 intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2);
2099 status = intel_sdvo_read_response(intel_output, 2104 status = intel_sdvo_read_response(intel_encoder,
2100 NULL, 0); 2105 NULL, 0);
2101 if (status != SDVO_CMD_STATUS_SUCCESS) { 2106 if (status != SDVO_CMD_STATUS_SUCCESS) {
2102 DRM_DEBUG_KMS("Incorrect SDVO command \n"); 2107 DRM_DEBUG_KMS("Incorrect SDVO command \n");
@@ -2191,7 +2196,7 @@ intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv)
2191} 2196}
2192 2197
2193static bool 2198static bool
2194intel_sdvo_get_digital_encoding_mode(struct intel_output *output) 2199intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output)
2195{ 2200{
2196 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 2201 struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
2197 uint8_t status; 2202 uint8_t status;
@@ -2205,42 +2210,42 @@ intel_sdvo_get_digital_encoding_mode(struct intel_output *output)
2205 return true; 2210 return true;
2206} 2211}
2207 2212
2208static struct intel_output * 2213static struct intel_encoder *
2209intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan) 2214intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan)
2210{ 2215{
2211 struct drm_device *dev = chan->drm_dev; 2216 struct drm_device *dev = chan->drm_dev;
2212 struct drm_connector *connector; 2217 struct drm_connector *connector;
2213 struct intel_output *intel_output = NULL; 2218 struct intel_encoder *intel_encoder = NULL;
2214 2219
2215 list_for_each_entry(connector, 2220 list_for_each_entry(connector,
2216 &dev->mode_config.connector_list, head) { 2221 &dev->mode_config.connector_list, head) {
2217 if (to_intel_output(connector)->ddc_bus == &chan->adapter) { 2222 if (to_intel_encoder(connector)->ddc_bus == &chan->adapter) {
2218 intel_output = to_intel_output(connector); 2223 intel_encoder = to_intel_encoder(connector);
2219 break; 2224 break;
2220 } 2225 }
2221 } 2226 }
2222 return intel_output; 2227 return intel_encoder;
2223} 2228}
2224 2229
2225static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, 2230static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
2226 struct i2c_msg msgs[], int num) 2231 struct i2c_msg msgs[], int num)
2227{ 2232{
2228 struct intel_output *intel_output; 2233 struct intel_encoder *intel_encoder;
2229 struct intel_sdvo_priv *sdvo_priv; 2234 struct intel_sdvo_priv *sdvo_priv;
2230 struct i2c_algo_bit_data *algo_data; 2235 struct i2c_algo_bit_data *algo_data;
2231 const struct i2c_algorithm *algo; 2236 const struct i2c_algorithm *algo;
2232 2237
2233 algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; 2238 algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
2234 intel_output = 2239 intel_encoder =
2235 intel_sdvo_chan_to_intel_output( 2240 intel_sdvo_chan_to_intel_encoder(
2236 (struct intel_i2c_chan *)(algo_data->data)); 2241 (struct intel_i2c_chan *)(algo_data->data));
2237 if (intel_output == NULL) 2242 if (intel_encoder == NULL)
2238 return -EINVAL; 2243 return -EINVAL;
2239 2244
2240 sdvo_priv = intel_output->dev_priv; 2245 sdvo_priv = intel_encoder->dev_priv;
2241 algo = intel_output->i2c_bus->algo; 2246 algo = intel_encoder->i2c_bus->algo;
2242 2247
2243 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); 2248 intel_sdvo_set_control_bus_switch(intel_encoder, sdvo_priv->ddc_bus);
2244 return algo->master_xfer(i2c_adap, msgs, num); 2249 return algo->master_xfer(i2c_adap, msgs, num);
2245} 2250}
2246 2251
@@ -2249,12 +2254,12 @@ static struct i2c_algorithm intel_sdvo_i2c_bit_algo = {
2249}; 2254};
2250 2255
2251static u8 2256static u8
2252intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device) 2257intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
2253{ 2258{
2254 struct drm_i915_private *dev_priv = dev->dev_private; 2259 struct drm_i915_private *dev_priv = dev->dev_private;
2255 struct sdvo_device_mapping *my_mapping, *other_mapping; 2260 struct sdvo_device_mapping *my_mapping, *other_mapping;
2256 2261
2257 if (output_device == SDVOB) { 2262 if (sdvo_reg == SDVOB) {
2258 my_mapping = &dev_priv->sdvo_mappings[0]; 2263 my_mapping = &dev_priv->sdvo_mappings[0];
2259 other_mapping = &dev_priv->sdvo_mappings[1]; 2264 other_mapping = &dev_priv->sdvo_mappings[1];
2260 } else { 2265 } else {
@@ -2279,7 +2284,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device)
2279 /* No SDVO device info is found for another DVO port, 2284 /* No SDVO device info is found for another DVO port,
2280 * so use mapping assumption we had before BIOS parsing. 2285 * so use mapping assumption we had before BIOS parsing.
2281 */ 2286 */
2282 if (output_device == SDVOB) 2287 if (sdvo_reg == SDVOB)
2283 return 0x70; 2288 return 0x70;
2284 else 2289 else
2285 return 0x72; 2290 return 0x72;
@@ -2305,15 +2310,15 @@ static struct dmi_system_id intel_sdvo_bad_tv[] = {
2305}; 2310};
2306 2311
2307static bool 2312static bool
2308intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) 2313intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
2309{ 2314{
2310 struct drm_connector *connector = &intel_output->base; 2315 struct drm_connector *connector = &intel_encoder->base;
2311 struct drm_encoder *encoder = &intel_output->enc; 2316 struct drm_encoder *encoder = &intel_encoder->enc;
2312 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 2317 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2313 bool ret = true, registered = false; 2318 bool ret = true, registered = false;
2314 2319
2315 sdvo_priv->is_tv = false; 2320 sdvo_priv->is_tv = false;
2316 intel_output->needs_tv_clock = false; 2321 intel_encoder->needs_tv_clock = false;
2317 sdvo_priv->is_lvds = false; 2322 sdvo_priv->is_lvds = false;
2318 2323
2319 if (device_is_registered(&connector->kdev)) { 2324 if (device_is_registered(&connector->kdev)) {
@@ -2331,16 +2336,16 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2331 encoder->encoder_type = DRM_MODE_ENCODER_TMDS; 2336 encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
2332 connector->connector_type = DRM_MODE_CONNECTOR_DVID; 2337 connector->connector_type = DRM_MODE_CONNECTOR_DVID;
2333 2338
2334 if (intel_sdvo_get_supp_encode(intel_output, 2339 if (intel_sdvo_get_supp_encode(intel_encoder,
2335 &sdvo_priv->encode) && 2340 &sdvo_priv->encode) &&
2336 intel_sdvo_get_digital_encoding_mode(intel_output) && 2341 intel_sdvo_get_digital_encoding_mode(intel_encoder) &&
2337 sdvo_priv->is_hdmi) { 2342 sdvo_priv->is_hdmi) {
2338 /* enable hdmi encoding mode if supported */ 2343 /* enable hdmi encoding mode if supported */
2339 intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI); 2344 intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
2340 intel_sdvo_set_colorimetry(intel_output, 2345 intel_sdvo_set_colorimetry(intel_encoder,
2341 SDVO_COLORIMETRY_RGB256); 2346 SDVO_COLORIMETRY_RGB256);
2342 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; 2347 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
2343 intel_output->clone_mask = 2348 intel_encoder->clone_mask =
2344 (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2349 (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2345 (1 << INTEL_ANALOG_CLONE_BIT); 2350 (1 << INTEL_ANALOG_CLONE_BIT);
2346 } 2351 }
@@ -2351,21 +2356,21 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2351 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; 2356 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2352 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; 2357 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2353 sdvo_priv->is_tv = true; 2358 sdvo_priv->is_tv = true;
2354 intel_output->needs_tv_clock = true; 2359 intel_encoder->needs_tv_clock = true;
2355 intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; 2360 intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2356 } else if (flags & SDVO_OUTPUT_RGB0) { 2361 } else if (flags & SDVO_OUTPUT_RGB0) {
2357 2362
2358 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; 2363 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
2359 encoder->encoder_type = DRM_MODE_ENCODER_DAC; 2364 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
2360 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2365 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2361 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2366 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2362 (1 << INTEL_ANALOG_CLONE_BIT); 2367 (1 << INTEL_ANALOG_CLONE_BIT);
2363 } else if (flags & SDVO_OUTPUT_RGB1) { 2368 } else if (flags & SDVO_OUTPUT_RGB1) {
2364 2369
2365 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; 2370 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
2366 encoder->encoder_type = DRM_MODE_ENCODER_DAC; 2371 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
2367 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2372 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2368 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2373 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2369 (1 << INTEL_ANALOG_CLONE_BIT); 2374 (1 << INTEL_ANALOG_CLONE_BIT);
2370 } else if (flags & SDVO_OUTPUT_CVBS0) { 2375 } else if (flags & SDVO_OUTPUT_CVBS0) {
2371 2376
@@ -2373,15 +2378,15 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2373 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; 2378 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2374 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; 2379 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2375 sdvo_priv->is_tv = true; 2380 sdvo_priv->is_tv = true;
2376 intel_output->needs_tv_clock = true; 2381 intel_encoder->needs_tv_clock = true;
2377 intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; 2382 intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2378 } else if (flags & SDVO_OUTPUT_LVDS0) { 2383 } else if (flags & SDVO_OUTPUT_LVDS0) {
2379 2384
2380 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; 2385 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
2381 encoder->encoder_type = DRM_MODE_ENCODER_LVDS; 2386 encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
2382 connector->connector_type = DRM_MODE_CONNECTOR_LVDS; 2387 connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
2383 sdvo_priv->is_lvds = true; 2388 sdvo_priv->is_lvds = true;
2384 intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | 2389 intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
2385 (1 << INTEL_SDVO_LVDS_CLONE_BIT); 2390 (1 << INTEL_SDVO_LVDS_CLONE_BIT);
2386 } else if (flags & SDVO_OUTPUT_LVDS1) { 2391 } else if (flags & SDVO_OUTPUT_LVDS1) {
2387 2392
@@ -2389,7 +2394,7 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2389 encoder->encoder_type = DRM_MODE_ENCODER_LVDS; 2394 encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
2390 connector->connector_type = DRM_MODE_CONNECTOR_LVDS; 2395 connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
2391 sdvo_priv->is_lvds = true; 2396 sdvo_priv->is_lvds = true;
2392 intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | 2397 intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
2393 (1 << INTEL_SDVO_LVDS_CLONE_BIT); 2398 (1 << INTEL_SDVO_LVDS_CLONE_BIT);
2394 } else { 2399 } else {
2395 2400
@@ -2402,7 +2407,7 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2402 bytes[0], bytes[1]); 2407 bytes[0], bytes[1]);
2403 ret = false; 2408 ret = false;
2404 } 2409 }
2405 intel_output->crtc_mask = (1 << 0) | (1 << 1); 2410 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
2406 2411
2407 if (ret && registered) 2412 if (ret && registered)
2408 ret = drm_sysfs_connector_add(connector) == 0 ? true : false; 2413 ret = drm_sysfs_connector_add(connector) == 0 ? true : false;
@@ -2414,18 +2419,18 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2414 2419
2415static void intel_sdvo_tv_create_property(struct drm_connector *connector) 2420static void intel_sdvo_tv_create_property(struct drm_connector *connector)
2416{ 2421{
2417 struct intel_output *intel_output = to_intel_output(connector); 2422 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
2418 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 2423 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2419 struct intel_sdvo_tv_format format; 2424 struct intel_sdvo_tv_format format;
2420 uint32_t format_map, i; 2425 uint32_t format_map, i;
2421 uint8_t status; 2426 uint8_t status;
2422 2427
2423 intel_sdvo_set_target_output(intel_output, 2428 intel_sdvo_set_target_output(intel_encoder,
2424 sdvo_priv->controlled_output); 2429 sdvo_priv->controlled_output);
2425 2430
2426 intel_sdvo_write_cmd(intel_output, 2431 intel_sdvo_write_cmd(intel_encoder,
2427 SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0); 2432 SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
2428 status = intel_sdvo_read_response(intel_output, 2433 status = intel_sdvo_read_response(intel_encoder,
2429 &format, sizeof(format)); 2434 &format, sizeof(format));
2430 if (status != SDVO_CMD_STATUS_SUCCESS) 2435 if (status != SDVO_CMD_STATUS_SUCCESS)
2431 return; 2436 return;
@@ -2463,16 +2468,16 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector)
2463 2468
2464static void intel_sdvo_create_enhance_property(struct drm_connector *connector) 2469static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2465{ 2470{
2466 struct intel_output *intel_output = to_intel_output(connector); 2471 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
2467 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 2472 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2468 struct intel_sdvo_enhancements_reply sdvo_data; 2473 struct intel_sdvo_enhancements_reply sdvo_data;
2469 struct drm_device *dev = connector->dev; 2474 struct drm_device *dev = connector->dev;
2470 uint8_t status; 2475 uint8_t status;
2471 uint16_t response, data_value[2]; 2476 uint16_t response, data_value[2];
2472 2477
2473 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, 2478 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
2474 NULL, 0); 2479 NULL, 0);
2475 status = intel_sdvo_read_response(intel_output, &sdvo_data, 2480 status = intel_sdvo_read_response(intel_encoder, &sdvo_data,
2476 sizeof(sdvo_data)); 2481 sizeof(sdvo_data));
2477 if (status != SDVO_CMD_STATUS_SUCCESS) { 2482 if (status != SDVO_CMD_STATUS_SUCCESS) {
2478 DRM_DEBUG_KMS(" incorrect response is returned\n"); 2483 DRM_DEBUG_KMS(" incorrect response is returned\n");
@@ -2488,18 +2493,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2488 * property 2493 * property
2489 */ 2494 */
2490 if (sdvo_data.overscan_h) { 2495 if (sdvo_data.overscan_h) {
2491 intel_sdvo_write_cmd(intel_output, 2496 intel_sdvo_write_cmd(intel_encoder,
2492 SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0); 2497 SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0);
2493 status = intel_sdvo_read_response(intel_output, 2498 status = intel_sdvo_read_response(intel_encoder,
2494 &data_value, 4); 2499 &data_value, 4);
2495 if (status != SDVO_CMD_STATUS_SUCCESS) { 2500 if (status != SDVO_CMD_STATUS_SUCCESS) {
2496 DRM_DEBUG_KMS("Incorrect SDVO max " 2501 DRM_DEBUG_KMS("Incorrect SDVO max "
2497 "h_overscan\n"); 2502 "h_overscan\n");
2498 return; 2503 return;
2499 } 2504 }
2500 intel_sdvo_write_cmd(intel_output, 2505 intel_sdvo_write_cmd(intel_encoder,
2501 SDVO_CMD_GET_OVERSCAN_H, NULL, 0); 2506 SDVO_CMD_GET_OVERSCAN_H, NULL, 0);
2502 status = intel_sdvo_read_response(intel_output, 2507 status = intel_sdvo_read_response(intel_encoder,
2503 &response, 2); 2508 &response, 2);
2504 if (status != SDVO_CMD_STATUS_SUCCESS) { 2509 if (status != SDVO_CMD_STATUS_SUCCESS) {
2505 DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n"); 2510 DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n");
@@ -2529,18 +2534,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2529 data_value[0], data_value[1], response); 2534 data_value[0], data_value[1], response);
2530 } 2535 }
2531 if (sdvo_data.overscan_v) { 2536 if (sdvo_data.overscan_v) {
2532 intel_sdvo_write_cmd(intel_output, 2537 intel_sdvo_write_cmd(intel_encoder,
2533 SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0); 2538 SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0);
2534 status = intel_sdvo_read_response(intel_output, 2539 status = intel_sdvo_read_response(intel_encoder,
2535 &data_value, 4); 2540 &data_value, 4);
2536 if (status != SDVO_CMD_STATUS_SUCCESS) { 2541 if (status != SDVO_CMD_STATUS_SUCCESS) {
2537 DRM_DEBUG_KMS("Incorrect SDVO max " 2542 DRM_DEBUG_KMS("Incorrect SDVO max "
2538 "v_overscan\n"); 2543 "v_overscan\n");
2539 return; 2544 return;
2540 } 2545 }
2541 intel_sdvo_write_cmd(intel_output, 2546 intel_sdvo_write_cmd(intel_encoder,
2542 SDVO_CMD_GET_OVERSCAN_V, NULL, 0); 2547 SDVO_CMD_GET_OVERSCAN_V, NULL, 0);
2543 status = intel_sdvo_read_response(intel_output, 2548 status = intel_sdvo_read_response(intel_encoder,
2544 &response, 2); 2549 &response, 2);
2545 if (status != SDVO_CMD_STATUS_SUCCESS) { 2550 if (status != SDVO_CMD_STATUS_SUCCESS) {
2546 DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n"); 2551 DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n");
@@ -2570,17 +2575,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2570 data_value[0], data_value[1], response); 2575 data_value[0], data_value[1], response);
2571 } 2576 }
2572 if (sdvo_data.position_h) { 2577 if (sdvo_data.position_h) {
2573 intel_sdvo_write_cmd(intel_output, 2578 intel_sdvo_write_cmd(intel_encoder,
2574 SDVO_CMD_GET_MAX_POSITION_H, NULL, 0); 2579 SDVO_CMD_GET_MAX_POSITION_H, NULL, 0);
2575 status = intel_sdvo_read_response(intel_output, 2580 status = intel_sdvo_read_response(intel_encoder,
2576 &data_value, 4); 2581 &data_value, 4);
2577 if (status != SDVO_CMD_STATUS_SUCCESS) { 2582 if (status != SDVO_CMD_STATUS_SUCCESS) {
2578 DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n"); 2583 DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n");
2579 return; 2584 return;
2580 } 2585 }
2581 intel_sdvo_write_cmd(intel_output, 2586 intel_sdvo_write_cmd(intel_encoder,
2582 SDVO_CMD_GET_POSITION_H, NULL, 0); 2587 SDVO_CMD_GET_POSITION_H, NULL, 0);
2583 status = intel_sdvo_read_response(intel_output, 2588 status = intel_sdvo_read_response(intel_encoder,
2584 &response, 2); 2589 &response, 2);
2585 if (status != SDVO_CMD_STATUS_SUCCESS) { 2590 if (status != SDVO_CMD_STATUS_SUCCESS) {
2586 DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n"); 2591 DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n");
@@ -2601,17 +2606,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2601 data_value[0], data_value[1], response); 2606 data_value[0], data_value[1], response);
2602 } 2607 }
2603 if (sdvo_data.position_v) { 2608 if (sdvo_data.position_v) {
2604 intel_sdvo_write_cmd(intel_output, 2609 intel_sdvo_write_cmd(intel_encoder,
2605 SDVO_CMD_GET_MAX_POSITION_V, NULL, 0); 2610 SDVO_CMD_GET_MAX_POSITION_V, NULL, 0);
2606 status = intel_sdvo_read_response(intel_output, 2611 status = intel_sdvo_read_response(intel_encoder,
2607 &data_value, 4); 2612 &data_value, 4);
2608 if (status != SDVO_CMD_STATUS_SUCCESS) { 2613 if (status != SDVO_CMD_STATUS_SUCCESS) {
2609 DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n"); 2614 DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n");
2610 return; 2615 return;
2611 } 2616 }
2612 intel_sdvo_write_cmd(intel_output, 2617 intel_sdvo_write_cmd(intel_encoder,
2613 SDVO_CMD_GET_POSITION_V, NULL, 0); 2618 SDVO_CMD_GET_POSITION_V, NULL, 0);
2614 status = intel_sdvo_read_response(intel_output, 2619 status = intel_sdvo_read_response(intel_encoder,
2615 &response, 2); 2620 &response, 2);
2616 if (status != SDVO_CMD_STATUS_SUCCESS) { 2621 if (status != SDVO_CMD_STATUS_SUCCESS) {
2617 DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n"); 2622 DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n");
@@ -2634,17 +2639,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2634 } 2639 }
2635 if (sdvo_priv->is_tv) { 2640 if (sdvo_priv->is_tv) {
2636 if (sdvo_data.saturation) { 2641 if (sdvo_data.saturation) {
2637 intel_sdvo_write_cmd(intel_output, 2642 intel_sdvo_write_cmd(intel_encoder,
2638 SDVO_CMD_GET_MAX_SATURATION, NULL, 0); 2643 SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
2639 status = intel_sdvo_read_response(intel_output, 2644 status = intel_sdvo_read_response(intel_encoder,
2640 &data_value, 4); 2645 &data_value, 4);
2641 if (status != SDVO_CMD_STATUS_SUCCESS) { 2646 if (status != SDVO_CMD_STATUS_SUCCESS) {
2642 DRM_DEBUG_KMS("Incorrect SDVO Max sat\n"); 2647 DRM_DEBUG_KMS("Incorrect SDVO Max sat\n");
2643 return; 2648 return;
2644 } 2649 }
2645 intel_sdvo_write_cmd(intel_output, 2650 intel_sdvo_write_cmd(intel_encoder,
2646 SDVO_CMD_GET_SATURATION, NULL, 0); 2651 SDVO_CMD_GET_SATURATION, NULL, 0);
2647 status = intel_sdvo_read_response(intel_output, 2652 status = intel_sdvo_read_response(intel_encoder,
2648 &response, 2); 2653 &response, 2);
2649 if (status != SDVO_CMD_STATUS_SUCCESS) { 2654 if (status != SDVO_CMD_STATUS_SUCCESS) {
2650 DRM_DEBUG_KMS("Incorrect SDVO get sat\n"); 2655 DRM_DEBUG_KMS("Incorrect SDVO get sat\n");
@@ -2666,17 +2671,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2666 data_value[0], data_value[1], response); 2671 data_value[0], data_value[1], response);
2667 } 2672 }
2668 if (sdvo_data.contrast) { 2673 if (sdvo_data.contrast) {
2669 intel_sdvo_write_cmd(intel_output, 2674 intel_sdvo_write_cmd(intel_encoder,
2670 SDVO_CMD_GET_MAX_CONTRAST, NULL, 0); 2675 SDVO_CMD_GET_MAX_CONTRAST, NULL, 0);
2671 status = intel_sdvo_read_response(intel_output, 2676 status = intel_sdvo_read_response(intel_encoder,
2672 &data_value, 4); 2677 &data_value, 4);
2673 if (status != SDVO_CMD_STATUS_SUCCESS) { 2678 if (status != SDVO_CMD_STATUS_SUCCESS) {
2674 DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n"); 2679 DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n");
2675 return; 2680 return;
2676 } 2681 }
2677 intel_sdvo_write_cmd(intel_output, 2682 intel_sdvo_write_cmd(intel_encoder,
2678 SDVO_CMD_GET_CONTRAST, NULL, 0); 2683 SDVO_CMD_GET_CONTRAST, NULL, 0);
2679 status = intel_sdvo_read_response(intel_output, 2684 status = intel_sdvo_read_response(intel_encoder,
2680 &response, 2); 2685 &response, 2);
2681 if (status != SDVO_CMD_STATUS_SUCCESS) { 2686 if (status != SDVO_CMD_STATUS_SUCCESS) {
2682 DRM_DEBUG_KMS("Incorrect SDVO get contrast\n"); 2687 DRM_DEBUG_KMS("Incorrect SDVO get contrast\n");
@@ -2697,17 +2702,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2697 data_value[0], data_value[1], response); 2702 data_value[0], data_value[1], response);
2698 } 2703 }
2699 if (sdvo_data.hue) { 2704 if (sdvo_data.hue) {
2700 intel_sdvo_write_cmd(intel_output, 2705 intel_sdvo_write_cmd(intel_encoder,
2701 SDVO_CMD_GET_MAX_HUE, NULL, 0); 2706 SDVO_CMD_GET_MAX_HUE, NULL, 0);
2702 status = intel_sdvo_read_response(intel_output, 2707 status = intel_sdvo_read_response(intel_encoder,
2703 &data_value, 4); 2708 &data_value, 4);
2704 if (status != SDVO_CMD_STATUS_SUCCESS) { 2709 if (status != SDVO_CMD_STATUS_SUCCESS) {
2705 DRM_DEBUG_KMS("Incorrect SDVO Max hue\n"); 2710 DRM_DEBUG_KMS("Incorrect SDVO Max hue\n");
2706 return; 2711 return;
2707 } 2712 }
2708 intel_sdvo_write_cmd(intel_output, 2713 intel_sdvo_write_cmd(intel_encoder,
2709 SDVO_CMD_GET_HUE, NULL, 0); 2714 SDVO_CMD_GET_HUE, NULL, 0);
2710 status = intel_sdvo_read_response(intel_output, 2715 status = intel_sdvo_read_response(intel_encoder,
2711 &response, 2); 2716 &response, 2);
2712 if (status != SDVO_CMD_STATUS_SUCCESS) { 2717 if (status != SDVO_CMD_STATUS_SUCCESS) {
2713 DRM_DEBUG_KMS("Incorrect SDVO get hue\n"); 2718 DRM_DEBUG_KMS("Incorrect SDVO get hue\n");
@@ -2730,17 +2735,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2730 } 2735 }
2731 if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { 2736 if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
2732 if (sdvo_data.brightness) { 2737 if (sdvo_data.brightness) {
2733 intel_sdvo_write_cmd(intel_output, 2738 intel_sdvo_write_cmd(intel_encoder,
2734 SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); 2739 SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
2735 status = intel_sdvo_read_response(intel_output, 2740 status = intel_sdvo_read_response(intel_encoder,
2736 &data_value, 4); 2741 &data_value, 4);
2737 if (status != SDVO_CMD_STATUS_SUCCESS) { 2742 if (status != SDVO_CMD_STATUS_SUCCESS) {
2738 DRM_DEBUG_KMS("Incorrect SDVO Max bright\n"); 2743 DRM_DEBUG_KMS("Incorrect SDVO Max bright\n");
2739 return; 2744 return;
2740 } 2745 }
2741 intel_sdvo_write_cmd(intel_output, 2746 intel_sdvo_write_cmd(intel_encoder,
2742 SDVO_CMD_GET_BRIGHTNESS, NULL, 0); 2747 SDVO_CMD_GET_BRIGHTNESS, NULL, 0);
2743 status = intel_sdvo_read_response(intel_output, 2748 status = intel_sdvo_read_response(intel_encoder,
2744 &response, 2); 2749 &response, 2);
2745 if (status != SDVO_CMD_STATUS_SUCCESS) { 2750 if (status != SDVO_CMD_STATUS_SUCCESS) {
2746 DRM_DEBUG_KMS("Incorrect SDVO get brigh\n"); 2751 DRM_DEBUG_KMS("Incorrect SDVO get brigh\n");
@@ -2765,81 +2770,81 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2765 return; 2770 return;
2766} 2771}
2767 2772
2768bool intel_sdvo_init(struct drm_device *dev, int output_device) 2773bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2769{ 2774{
2770 struct drm_i915_private *dev_priv = dev->dev_private; 2775 struct drm_i915_private *dev_priv = dev->dev_private;
2771 struct drm_connector *connector; 2776 struct drm_connector *connector;
2772 struct intel_output *intel_output; 2777 struct intel_encoder *intel_encoder;
2773 struct intel_sdvo_priv *sdvo_priv; 2778 struct intel_sdvo_priv *sdvo_priv;
2774 2779
2775 u8 ch[0x40]; 2780 u8 ch[0x40];
2776 int i; 2781 int i;
2777 2782
2778 intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); 2783 intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
2779 if (!intel_output) { 2784 if (!intel_encoder) {
2780 return false; 2785 return false;
2781 } 2786 }
2782 2787
2783 sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); 2788 sdvo_priv = (struct intel_sdvo_priv *)(intel_encoder + 1);
2784 sdvo_priv->output_device = output_device; 2789 sdvo_priv->sdvo_reg = sdvo_reg;
2785 2790
2786 intel_output->dev_priv = sdvo_priv; 2791 intel_encoder->dev_priv = sdvo_priv;
2787 intel_output->type = INTEL_OUTPUT_SDVO; 2792 intel_encoder->type = INTEL_OUTPUT_SDVO;
2788 2793
2789 /* setup the DDC bus. */ 2794 /* setup the DDC bus. */
2790 if (output_device == SDVOB) 2795 if (sdvo_reg == SDVOB)
2791 intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); 2796 intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
2792 else 2797 else
2793 intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); 2798 intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
2794 2799
2795 if (!intel_output->i2c_bus) 2800 if (!intel_encoder->i2c_bus)
2796 goto err_inteloutput; 2801 goto err_inteloutput;
2797 2802
2798 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device); 2803 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
2799 2804
2800 /* Save the bit-banging i2c functionality for use by the DDC wrapper */ 2805 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
2801 intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality; 2806 intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality;
2802 2807
2803 /* Read the regs to test if we can talk to the device */ 2808 /* Read the regs to test if we can talk to the device */
2804 for (i = 0; i < 0x40; i++) { 2809 for (i = 0; i < 0x40; i++) {
2805 if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) { 2810 if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) {
2806 DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", 2811 DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
2807 output_device == SDVOB ? 'B' : 'C'); 2812 sdvo_reg == SDVOB ? 'B' : 'C');
2808 goto err_i2c; 2813 goto err_i2c;
2809 } 2814 }
2810 } 2815 }
2811 2816
2812 /* setup the DDC bus. */ 2817 /* setup the DDC bus. */
2813 if (output_device == SDVOB) { 2818 if (sdvo_reg == SDVOB) {
2814 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); 2819 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
2815 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, 2820 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
2816 "SDVOB/VGA DDC BUS"); 2821 "SDVOB/VGA DDC BUS");
2817 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; 2822 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
2818 } else { 2823 } else {
2819 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); 2824 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
2820 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, 2825 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
2821 "SDVOC/VGA DDC BUS"); 2826 "SDVOC/VGA DDC BUS");
2822 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; 2827 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
2823 } 2828 }
2824 2829
2825 if (intel_output->ddc_bus == NULL) 2830 if (intel_encoder->ddc_bus == NULL)
2826 goto err_i2c; 2831 goto err_i2c;
2827 2832
2828 /* Wrap with our custom algo which switches to DDC mode */ 2833 /* Wrap with our custom algo which switches to DDC mode */
2829 intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; 2834 intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
2830 2835
2831 /* In default case sdvo lvds is false */ 2836 /* In default case sdvo lvds is false */
2832 intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); 2837 intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps);
2833 2838
2834 if (intel_sdvo_output_setup(intel_output, 2839 if (intel_sdvo_output_setup(intel_encoder,
2835 sdvo_priv->caps.output_flags) != true) { 2840 sdvo_priv->caps.output_flags) != true) {
2836 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", 2841 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
2837 output_device == SDVOB ? 'B' : 'C'); 2842 sdvo_reg == SDVOB ? 'B' : 'C');
2838 goto err_i2c; 2843 goto err_i2c;
2839 } 2844 }
2840 2845
2841 2846
2842 connector = &intel_output->base; 2847 connector = &intel_encoder->base;
2843 drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, 2848 drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
2844 connector->connector_type); 2849 connector->connector_type);
2845 2850
@@ -2848,12 +2853,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
2848 connector->doublescan_allowed = 0; 2853 connector->doublescan_allowed = 0;
2849 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 2854 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
2850 2855
2851 drm_encoder_init(dev, &intel_output->enc, 2856 drm_encoder_init(dev, &intel_encoder->enc,
2852 &intel_sdvo_enc_funcs, intel_output->enc.encoder_type); 2857 &intel_sdvo_enc_funcs, intel_encoder->enc.encoder_type);
2853 2858
2854 drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs); 2859 drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
2855 2860
2856 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 2861 drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
2857 if (sdvo_priv->is_tv) 2862 if (sdvo_priv->is_tv)
2858 intel_sdvo_tv_create_property(connector); 2863 intel_sdvo_tv_create_property(connector);
2859 2864
@@ -2865,9 +2870,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
2865 intel_sdvo_select_ddc_bus(sdvo_priv); 2870 intel_sdvo_select_ddc_bus(sdvo_priv);
2866 2871
2867 /* Set the input timing to the screen. Assume always input 0. */ 2872 /* Set the input timing to the screen. Assume always input 0. */
2868 intel_sdvo_set_target_input(intel_output, true, false); 2873 intel_sdvo_set_target_input(intel_encoder, true, false);
2869 2874
2870 intel_sdvo_get_input_pixel_clock_range(intel_output, 2875 intel_sdvo_get_input_pixel_clock_range(intel_encoder,
2871 &sdvo_priv->pixel_clock_min, 2876 &sdvo_priv->pixel_clock_min,
2872 &sdvo_priv->pixel_clock_max); 2877 &sdvo_priv->pixel_clock_max);
2873 2878
@@ -2894,12 +2899,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
2894err_i2c: 2899err_i2c:
2895 if (sdvo_priv->analog_ddc_bus != NULL) 2900 if (sdvo_priv->analog_ddc_bus != NULL)
2896 intel_i2c_destroy(sdvo_priv->analog_ddc_bus); 2901 intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
2897 if (intel_output->ddc_bus != NULL) 2902 if (intel_encoder->ddc_bus != NULL)
2898 intel_i2c_destroy(intel_output->ddc_bus); 2903 intel_i2c_destroy(intel_encoder->ddc_bus);
2899 if (intel_output->i2c_bus != NULL) 2904 if (intel_encoder->i2c_bus != NULL)
2900 intel_i2c_destroy(intel_output->i2c_bus); 2905 intel_i2c_destroy(intel_encoder->i2c_bus);
2901err_inteloutput: 2906err_inteloutput:
2902 kfree(intel_output); 2907 kfree(intel_encoder);
2903 2908
2904 return false; 2909 return false;
2905} 2910}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 552ec110b741..d7d39b2327df 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -921,8 +921,8 @@ intel_tv_save(struct drm_connector *connector)
921{ 921{
922 struct drm_device *dev = connector->dev; 922 struct drm_device *dev = connector->dev;
923 struct drm_i915_private *dev_priv = dev->dev_private; 923 struct drm_i915_private *dev_priv = dev->dev_private;
924 struct intel_output *intel_output = to_intel_output(connector); 924 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
925 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 925 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
926 int i; 926 int i;
927 927
928 tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1); 928 tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1);
@@ -971,8 +971,8 @@ intel_tv_restore(struct drm_connector *connector)
971{ 971{
972 struct drm_device *dev = connector->dev; 972 struct drm_device *dev = connector->dev;
973 struct drm_i915_private *dev_priv = dev->dev_private; 973 struct drm_i915_private *dev_priv = dev->dev_private;
974 struct intel_output *intel_output = to_intel_output(connector); 974 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
975 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 975 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
976 struct drm_crtc *crtc = connector->encoder->crtc; 976 struct drm_crtc *crtc = connector->encoder->crtc;
977 struct intel_crtc *intel_crtc; 977 struct intel_crtc *intel_crtc;
978 int i; 978 int i;
@@ -1068,9 +1068,9 @@ intel_tv_mode_lookup (char *tv_format)
1068} 1068}
1069 1069
1070static const struct tv_mode * 1070static const struct tv_mode *
1071intel_tv_mode_find (struct intel_output *intel_output) 1071intel_tv_mode_find (struct intel_encoder *intel_encoder)
1072{ 1072{
1073 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 1073 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1074 1074
1075 return intel_tv_mode_lookup(tv_priv->tv_format); 1075 return intel_tv_mode_lookup(tv_priv->tv_format);
1076} 1076}
@@ -1078,8 +1078,8 @@ intel_tv_mode_find (struct intel_output *intel_output)
1078static enum drm_mode_status 1078static enum drm_mode_status
1079intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) 1079intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
1080{ 1080{
1081 struct intel_output *intel_output = to_intel_output(connector); 1081 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1082 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1082 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1083 1083
1084 /* Ensure TV refresh is close to desired refresh */ 1084 /* Ensure TV refresh is close to desired refresh */
1085 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) 1085 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
@@ -1095,8 +1095,8 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
1095{ 1095{
1096 struct drm_device *dev = encoder->dev; 1096 struct drm_device *dev = encoder->dev;
1097 struct drm_mode_config *drm_config = &dev->mode_config; 1097 struct drm_mode_config *drm_config = &dev->mode_config;
1098 struct intel_output *intel_output = enc_to_intel_output(encoder); 1098 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1099 const struct tv_mode *tv_mode = intel_tv_mode_find (intel_output); 1099 const struct tv_mode *tv_mode = intel_tv_mode_find (intel_encoder);
1100 struct drm_encoder *other_encoder; 1100 struct drm_encoder *other_encoder;
1101 1101
1102 if (!tv_mode) 1102 if (!tv_mode)
@@ -1121,9 +1121,9 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1121 struct drm_i915_private *dev_priv = dev->dev_private; 1121 struct drm_i915_private *dev_priv = dev->dev_private;
1122 struct drm_crtc *crtc = encoder->crtc; 1122 struct drm_crtc *crtc = encoder->crtc;
1123 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1123 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1124 struct intel_output *intel_output = enc_to_intel_output(encoder); 1124 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1125 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 1125 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1126 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1126 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1127 u32 tv_ctl; 1127 u32 tv_ctl;
1128 u32 hctl1, hctl2, hctl3; 1128 u32 hctl1, hctl2, hctl3;
1129 u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7; 1129 u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
@@ -1360,9 +1360,9 @@ static const struct drm_display_mode reported_modes[] = {
1360 * \return false if TV is disconnected. 1360 * \return false if TV is disconnected.
1361 */ 1361 */
1362static int 1362static int
1363intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) 1363intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
1364{ 1364{
1365 struct drm_encoder *encoder = &intel_output->enc; 1365 struct drm_encoder *encoder = &intel_encoder->enc;
1366 struct drm_device *dev = encoder->dev; 1366 struct drm_device *dev = encoder->dev;
1367 struct drm_i915_private *dev_priv = dev->dev_private; 1367 struct drm_i915_private *dev_priv = dev->dev_private;
1368 unsigned long irqflags; 1368 unsigned long irqflags;
@@ -1441,9 +1441,9 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
1441 */ 1441 */
1442static void intel_tv_find_better_format(struct drm_connector *connector) 1442static void intel_tv_find_better_format(struct drm_connector *connector)
1443{ 1443{
1444 struct intel_output *intel_output = to_intel_output(connector); 1444 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1445 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 1445 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1446 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1446 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1447 int i; 1447 int i;
1448 1448
1449 if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) == 1449 if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
@@ -1475,9 +1475,9 @@ intel_tv_detect(struct drm_connector *connector)
1475{ 1475{
1476 struct drm_crtc *crtc; 1476 struct drm_crtc *crtc;
1477 struct drm_display_mode mode; 1477 struct drm_display_mode mode;
1478 struct intel_output *intel_output = to_intel_output(connector); 1478 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1479 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 1479 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1480 struct drm_encoder *encoder = &intel_output->enc; 1480 struct drm_encoder *encoder = &intel_encoder->enc;
1481 int dpms_mode; 1481 int dpms_mode;
1482 int type = tv_priv->type; 1482 int type = tv_priv->type;
1483 1483
@@ -1485,12 +1485,12 @@ intel_tv_detect(struct drm_connector *connector)
1485 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); 1485 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
1486 1486
1487 if (encoder->crtc && encoder->crtc->enabled) { 1487 if (encoder->crtc && encoder->crtc->enabled) {
1488 type = intel_tv_detect_type(encoder->crtc, intel_output); 1488 type = intel_tv_detect_type(encoder->crtc, intel_encoder);
1489 } else { 1489 } else {
1490 crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode); 1490 crtc = intel_get_load_detect_pipe(intel_encoder, &mode, &dpms_mode);
1491 if (crtc) { 1491 if (crtc) {
1492 type = intel_tv_detect_type(crtc, intel_output); 1492 type = intel_tv_detect_type(crtc, intel_encoder);
1493 intel_release_load_detect_pipe(intel_output, dpms_mode); 1493 intel_release_load_detect_pipe(intel_encoder, dpms_mode);
1494 } else 1494 } else
1495 type = -1; 1495 type = -1;
1496 } 1496 }
@@ -1525,8 +1525,8 @@ static void
1525intel_tv_chose_preferred_modes(struct drm_connector *connector, 1525intel_tv_chose_preferred_modes(struct drm_connector *connector,
1526 struct drm_display_mode *mode_ptr) 1526 struct drm_display_mode *mode_ptr)
1527{ 1527{
1528 struct intel_output *intel_output = to_intel_output(connector); 1528 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1529 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1529 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1530 1530
1531 if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) 1531 if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
1532 mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; 1532 mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
@@ -1550,8 +1550,8 @@ static int
1550intel_tv_get_modes(struct drm_connector *connector) 1550intel_tv_get_modes(struct drm_connector *connector)
1551{ 1551{
1552 struct drm_display_mode *mode_ptr; 1552 struct drm_display_mode *mode_ptr;
1553 struct intel_output *intel_output = to_intel_output(connector); 1553 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1554 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1554 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1555 int j, count = 0; 1555 int j, count = 0;
1556 u64 tmp; 1556 u64 tmp;
1557 1557
@@ -1604,11 +1604,11 @@ intel_tv_get_modes(struct drm_connector *connector)
1604static void 1604static void
1605intel_tv_destroy (struct drm_connector *connector) 1605intel_tv_destroy (struct drm_connector *connector)
1606{ 1606{
1607 struct intel_output *intel_output = to_intel_output(connector); 1607 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1608 1608
1609 drm_sysfs_connector_remove(connector); 1609 drm_sysfs_connector_remove(connector);
1610 drm_connector_cleanup(connector); 1610 drm_connector_cleanup(connector);
1611 kfree(intel_output); 1611 kfree(intel_encoder);
1612} 1612}
1613 1613
1614 1614
@@ -1617,9 +1617,9 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
1617 uint64_t val) 1617 uint64_t val)
1618{ 1618{
1619 struct drm_device *dev = connector->dev; 1619 struct drm_device *dev = connector->dev;
1620 struct intel_output *intel_output = to_intel_output(connector); 1620 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1621 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 1621 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1622 struct drm_encoder *encoder = &intel_output->enc; 1622 struct drm_encoder *encoder = &intel_encoder->enc;
1623 struct drm_crtc *crtc = encoder->crtc; 1623 struct drm_crtc *crtc = encoder->crtc;
1624 int ret = 0; 1624 int ret = 0;
1625 bool changed = false; 1625 bool changed = false;
@@ -1740,7 +1740,7 @@ intel_tv_init(struct drm_device *dev)
1740{ 1740{
1741 struct drm_i915_private *dev_priv = dev->dev_private; 1741 struct drm_i915_private *dev_priv = dev->dev_private;
1742 struct drm_connector *connector; 1742 struct drm_connector *connector;
1743 struct intel_output *intel_output; 1743 struct intel_encoder *intel_encoder;
1744 struct intel_tv_priv *tv_priv; 1744 struct intel_tv_priv *tv_priv;
1745 u32 tv_dac_on, tv_dac_off, save_tv_dac; 1745 u32 tv_dac_on, tv_dac_off, save_tv_dac;
1746 char **tv_format_names; 1746 char **tv_format_names;
@@ -1780,28 +1780,28 @@ intel_tv_init(struct drm_device *dev)
1780 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) 1780 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
1781 return; 1781 return;
1782 1782
1783 intel_output = kzalloc(sizeof(struct intel_output) + 1783 intel_encoder = kzalloc(sizeof(struct intel_encoder) +
1784 sizeof(struct intel_tv_priv), GFP_KERNEL); 1784 sizeof(struct intel_tv_priv), GFP_KERNEL);
1785 if (!intel_output) { 1785 if (!intel_encoder) {
1786 return; 1786 return;
1787 } 1787 }
1788 1788
1789 connector = &intel_output->base; 1789 connector = &intel_encoder->base;
1790 1790
1791 drm_connector_init(dev, connector, &intel_tv_connector_funcs, 1791 drm_connector_init(dev, connector, &intel_tv_connector_funcs,
1792 DRM_MODE_CONNECTOR_SVIDEO); 1792 DRM_MODE_CONNECTOR_SVIDEO);
1793 1793
1794 drm_encoder_init(dev, &intel_output->enc, &intel_tv_enc_funcs, 1794 drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs,
1795 DRM_MODE_ENCODER_TVDAC); 1795 DRM_MODE_ENCODER_TVDAC);
1796 1796
1797 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 1797 drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
1798 tv_priv = (struct intel_tv_priv *)(intel_output + 1); 1798 tv_priv = (struct intel_tv_priv *)(intel_encoder + 1);
1799 intel_output->type = INTEL_OUTPUT_TVOUT; 1799 intel_encoder->type = INTEL_OUTPUT_TVOUT;
1800 intel_output->crtc_mask = (1 << 0) | (1 << 1); 1800 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
1801 intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT); 1801 intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
1802 intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1)); 1802 intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1));
1803 intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); 1803 intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
1804 intel_output->dev_priv = tv_priv; 1804 intel_encoder->dev_priv = tv_priv;
1805 tv_priv->type = DRM_MODE_CONNECTOR_Unknown; 1805 tv_priv->type = DRM_MODE_CONNECTOR_Unknown;
1806 1806
1807 /* BIOS margin values */ 1807 /* BIOS margin values */
@@ -1812,7 +1812,7 @@ intel_tv_init(struct drm_device *dev)
1812 1812
1813 tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); 1813 tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
1814 1814
1815 drm_encoder_helper_add(&intel_output->enc, &intel_tv_helper_funcs); 1815 drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs);
1816 drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); 1816 drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
1817 connector->interlace_allowed = false; 1817 connector->interlace_allowed = false;
1818 connector->doublescan_allowed = false; 1818 connector->doublescan_allowed = false;
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index bcec2d79636e..1d569830ed99 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -908,11 +908,16 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
908 uint8_t attr = U8((*ptr)++), shift; 908 uint8_t attr = U8((*ptr)++), shift;
909 uint32_t saved, dst; 909 uint32_t saved, dst;
910 int dptr = *ptr; 910 int dptr = *ptr;
911 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
911 SDEBUG(" dst: "); 912 SDEBUG(" dst: ");
912 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 913 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
914 /* op needs to full dst value */
915 dst = saved;
913 shift = atom_get_src(ctx, attr, ptr); 916 shift = atom_get_src(ctx, attr, ptr);
914 SDEBUG(" shift: %d\n", shift); 917 SDEBUG(" shift: %d\n", shift);
915 dst <<= shift; 918 dst <<= shift;
919 dst &= atom_arg_mask[dst_align];
920 dst >>= atom_arg_shift[dst_align];
916 SDEBUG(" dst: "); 921 SDEBUG(" dst: ");
917 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 922 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
918} 923}
@@ -922,11 +927,16 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
922 uint8_t attr = U8((*ptr)++), shift; 927 uint8_t attr = U8((*ptr)++), shift;
923 uint32_t saved, dst; 928 uint32_t saved, dst;
924 int dptr = *ptr; 929 int dptr = *ptr;
930 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
925 SDEBUG(" dst: "); 931 SDEBUG(" dst: ");
926 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 932 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
933 /* op needs to full dst value */
934 dst = saved;
927 shift = atom_get_src(ctx, attr, ptr); 935 shift = atom_get_src(ctx, attr, ptr);
928 SDEBUG(" shift: %d\n", shift); 936 SDEBUG(" shift: %d\n", shift);
929 dst >>= shift; 937 dst >>= shift;
938 dst &= atom_arg_mask[dst_align];
939 dst >>= atom_arg_shift[dst_align];
930 SDEBUG(" dst: "); 940 SDEBUG(" dst: ");
931 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 941 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
932} 942}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index fd4ef6d18849..a87990b3ae84 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -521,6 +521,10 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
521 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 521 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
522 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) 522 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
523 adjusted_clock = mode->clock * 2; 523 adjusted_clock = mode->clock * 2;
524 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
525 pll->algo = PLL_ALGO_LEGACY;
526 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
527 }
524 } else { 528 } else {
525 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 529 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
526 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; 530 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c9580497ede4..d7388fdb6d0b 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2891,7 +2891,7 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
2891{ 2891{
2892 struct radeon_bo *robj; 2892 struct radeon_bo *robj;
2893 unsigned long size; 2893 unsigned long size;
2894 unsigned u, i, w, h; 2894 unsigned u, i, w, h, d;
2895 int ret; 2895 int ret;
2896 2896
2897 for (u = 0; u < track->num_texture; u++) { 2897 for (u = 0; u < track->num_texture; u++) {
@@ -2923,20 +2923,25 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
2923 h = h / (1 << i); 2923 h = h / (1 << i);
2924 if (track->textures[u].roundup_h) 2924 if (track->textures[u].roundup_h)
2925 h = roundup_pow_of_two(h); 2925 h = roundup_pow_of_two(h);
2926 if (track->textures[u].tex_coord_type == 1) {
2927 d = (1 << track->textures[u].txdepth) / (1 << i);
2928 if (!d)
2929 d = 1;
2930 } else {
2931 d = 1;
2932 }
2926 if (track->textures[u].compress_format) { 2933 if (track->textures[u].compress_format) {
2927 2934
2928 size += r100_track_compress_size(track->textures[u].compress_format, w, h); 2935 size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
2929 /* compressed textures are block based */ 2936 /* compressed textures are block based */
2930 } else 2937 } else
2931 size += w * h; 2938 size += w * h * d;
2932 } 2939 }
2933 size *= track->textures[u].cpp; 2940 size *= track->textures[u].cpp;
2934 2941
2935 switch (track->textures[u].tex_coord_type) { 2942 switch (track->textures[u].tex_coord_type) {
2936 case 0: 2943 case 0:
2937 break;
2938 case 1: 2944 case 1:
2939 size *= (1 << track->textures[u].txdepth);
2940 break; 2945 break;
2941 case 2: 2946 case 2:
2942 if (track->separate_cube) { 2947 if (track->separate_cube) {
@@ -3007,7 +3012,11 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3007 } 3012 }
3008 } 3013 }
3009 prim_walk = (track->vap_vf_cntl >> 4) & 0x3; 3014 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
3010 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; 3015 if (track->vap_vf_cntl & (1 << 14)) {
3016 nverts = track->vap_alt_nverts;
3017 } else {
3018 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
3019 }
3011 switch (prim_walk) { 3020 switch (prim_walk) {
3012 case 1: 3021 case 1:
3013 for (i = 0; i < track->num_arrays; i++) { 3022 for (i = 0; i < track->num_arrays; i++) {
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index b27a6999d219..fadfe68de9cc 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -64,6 +64,7 @@ struct r100_cs_track {
64 unsigned maxy; 64 unsigned maxy;
65 unsigned vtx_size; 65 unsigned vtx_size;
66 unsigned vap_vf_cntl; 66 unsigned vap_vf_cntl;
67 unsigned vap_alt_nverts;
67 unsigned immd_dwords; 68 unsigned immd_dwords;
68 unsigned num_arrays; 69 unsigned num_arrays;
69 unsigned max_indx; 70 unsigned max_indx;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 2b9affe754ce..bd75f99bd65e 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -730,6 +730,12 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
730 /* VAP_VF_MAX_VTX_INDX */ 730 /* VAP_VF_MAX_VTX_INDX */
731 track->max_indx = idx_value & 0x00FFFFFFUL; 731 track->max_indx = idx_value & 0x00FFFFFFUL;
732 break; 732 break;
733 case 0x2088:
734 /* VAP_ALT_NUM_VERTICES - only valid on r500 */
735 if (p->rdev->family < CHIP_RV515)
736 goto fail;
737 track->vap_alt_nverts = idx_value & 0xFFFFFF;
738 break;
733 case 0x43E4: 739 case 0x43E4:
734 /* SC_SCISSOR1 */ 740 /* SC_SCISSOR1 */
735 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; 741 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
@@ -767,7 +773,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
767 tmp = idx_value & ~(0x7 << 16); 773 tmp = idx_value & ~(0x7 << 16);
768 tmp |= tile_flags; 774 tmp |= tile_flags;
769 ib[idx] = tmp; 775 ib[idx] = tmp;
770
771 i = (reg - 0x4E38) >> 2; 776 i = (reg - 0x4E38) >> 2;
772 track->cb[i].pitch = idx_value & 0x3FFE; 777 track->cb[i].pitch = idx_value & 0x3FFE;
773 switch (((idx_value >> 21) & 0xF)) { 778 switch (((idx_value >> 21) & 0xF)) {
@@ -1052,11 +1057,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1052 break; 1057 break;
1053 /* fallthrough do not move */ 1058 /* fallthrough do not move */
1054 default: 1059 default:
1055 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 1060 goto fail;
1056 reg, idx);
1057 return -EINVAL;
1058 } 1061 }
1059 return 0; 1062 return 0;
1063fail:
1064 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1065 reg, idx);
1066 return -EINVAL;
1060} 1067}
1061 1068
1062static int r300_packet3_check(struct radeon_cs_parser *p, 1069static int r300_packet3_check(struct radeon_cs_parser *p,
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index dac7042b797e..1d898051c631 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -35,7 +35,7 @@
35 */ 35 */
36static int r600_audio_chipset_supported(struct radeon_device *rdev) 36static int r600_audio_chipset_supported(struct radeon_device *rdev)
37{ 37{
38 return rdev->family >= CHIP_R600 38 return (rdev->family >= CHIP_R600 && rdev->family < CHIP_CEDAR)
39 || rdev->family == CHIP_RS600 39 || rdev->family == CHIP_RS600
40 || rdev->family == CHIP_RS690 40 || rdev->family == CHIP_RS690
41 || rdev->family == CHIP_RS740; 41 || rdev->family == CHIP_RS740;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 029fa1406d1d..2616b822ba68 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -314,6 +314,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
314 struct radeon_device *rdev = dev->dev_private; 314 struct radeon_device *rdev = dev->dev_private;
315 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; 315 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
316 316
317 if (ASIC_IS_DCE4(rdev))
318 return;
319
317 if (!offset) 320 if (!offset)
318 return; 321 return;
319 322
@@ -484,6 +487,9 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
484 struct radeon_device *rdev = dev->dev_private; 487 struct radeon_device *rdev = dev->dev_private;
485 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 488 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
486 489
490 if (ASIC_IS_DCE4(rdev))
491 return;
492
487 if (!radeon_encoder->hdmi_offset) { 493 if (!radeon_encoder->hdmi_offset) {
488 r600_hdmi_assign_block(encoder); 494 r600_hdmi_assign_block(encoder);
489 if (!radeon_encoder->hdmi_offset) { 495 if (!radeon_encoder->hdmi_offset) {
@@ -525,6 +531,9 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
525 struct radeon_device *rdev = dev->dev_private; 531 struct radeon_device *rdev = dev->dev_private;
526 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 532 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
527 533
534 if (ASIC_IS_DCE4(rdev))
535 return;
536
528 if (!radeon_encoder->hdmi_offset) { 537 if (!radeon_encoder->hdmi_offset) {
529 dev_err(rdev->dev, "Disabling not enabled HDMI\n"); 538 dev_err(rdev->dev, "Disabling not enabled HDMI\n");
530 return; 539 return;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 3fba50540f72..1331351c5178 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -162,12 +162,14 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
162{ 162{
163 struct drm_device *dev = connector->dev; 163 struct drm_device *dev = connector->dev;
164 struct drm_connector *conflict; 164 struct drm_connector *conflict;
165 struct radeon_connector *radeon_conflict;
165 int i; 166 int i;
166 167
167 list_for_each_entry(conflict, &dev->mode_config.connector_list, head) { 168 list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
168 if (conflict == connector) 169 if (conflict == connector)
169 continue; 170 continue;
170 171
172 radeon_conflict = to_radeon_connector(conflict);
171 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 173 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
172 if (conflict->encoder_ids[i] == 0) 174 if (conflict->encoder_ids[i] == 0)
173 break; 175 break;
@@ -177,6 +179,9 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
177 if (conflict->status != connector_status_connected) 179 if (conflict->status != connector_status_connected)
178 continue; 180 continue;
179 181
182 if (radeon_conflict->use_digital)
183 continue;
184
180 if (priority == true) { 185 if (priority == true) {
181 DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); 186 DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
182 DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); 187 DRM_INFO("in favor of %s\n", drm_get_connector_name(connector));
@@ -287,6 +292,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
287 292
288 if (property == rdev->mode_info.coherent_mode_property) { 293 if (property == rdev->mode_info.coherent_mode_property) {
289 struct radeon_encoder_atom_dig *dig; 294 struct radeon_encoder_atom_dig *dig;
295 bool new_coherent_mode;
290 296
291 /* need to find digital encoder on connector */ 297 /* need to find digital encoder on connector */
292 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); 298 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
@@ -299,8 +305,11 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
299 return 0; 305 return 0;
300 306
301 dig = radeon_encoder->enc_priv; 307 dig = radeon_encoder->enc_priv;
302 dig->coherent_mode = val ? true : false; 308 new_coherent_mode = val ? true : false;
303 radeon_property_change_mode(&radeon_encoder->base); 309 if (dig->coherent_mode != new_coherent_mode) {
310 dig->coherent_mode = new_coherent_mode;
311 radeon_property_change_mode(&radeon_encoder->base);
312 }
304 } 313 }
305 314
306 if (property == rdev->mode_info.tv_std_property) { 315 if (property == rdev->mode_info.tv_std_property) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index bddf17f97da8..7b629e305560 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -36,6 +36,54 @@
36#include "radeon.h" 36#include "radeon.h"
37#include "atom.h" 37#include "atom.h"
38 38
39static const char radeon_family_name[][16] = {
40 "R100",
41 "RV100",
42 "RS100",
43 "RV200",
44 "RS200",
45 "R200",
46 "RV250",
47 "RS300",
48 "RV280",
49 "R300",
50 "R350",
51 "RV350",
52 "RV380",
53 "R420",
54 "R423",
55 "RV410",
56 "RS400",
57 "RS480",
58 "RS600",
59 "RS690",
60 "RS740",
61 "RV515",
62 "R520",
63 "RV530",
64 "RV560",
65 "RV570",
66 "R580",
67 "R600",
68 "RV610",
69 "RV630",
70 "RV670",
71 "RV620",
72 "RV635",
73 "RS780",
74 "RS880",
75 "RV770",
76 "RV730",
77 "RV710",
78 "RV740",
79 "CEDAR",
80 "REDWOOD",
81 "JUNIPER",
82 "CYPRESS",
83 "HEMLOCK",
84 "LAST",
85};
86
39/* 87/*
40 * Clear GPU surface registers. 88 * Clear GPU surface registers.
41 */ 89 */
@@ -526,7 +574,6 @@ int radeon_device_init(struct radeon_device *rdev,
526 int r; 574 int r;
527 int dma_bits; 575 int dma_bits;
528 576
529 DRM_INFO("radeon: Initializing kernel modesetting.\n");
530 rdev->shutdown = false; 577 rdev->shutdown = false;
531 rdev->dev = &pdev->dev; 578 rdev->dev = &pdev->dev;
532 rdev->ddev = ddev; 579 rdev->ddev = ddev;
@@ -538,6 +585,10 @@ int radeon_device_init(struct radeon_device *rdev,
538 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 585 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
539 rdev->gpu_lockup = false; 586 rdev->gpu_lockup = false;
540 rdev->accel_working = false; 587 rdev->accel_working = false;
588
589 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n",
590 radeon_family_name[rdev->family], pdev->vendor, pdev->device);
591
541 /* mutex initialization are all done here so we 592 /* mutex initialization are all done here so we
542 * can recall function without having locking issues */ 593 * can recall function without having locking issues */
543 mutex_init(&rdev->cs_mutex); 594 mutex_init(&rdev->cs_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 055a51732dcb..4b05563d99e1 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -43,9 +43,10 @@
43 * - 2.0.0 - initial interface 43 * - 2.0.0 - initial interface
44 * - 2.1.0 - add square tiling interface 44 * - 2.1.0 - add square tiling interface
45 * - 2.2.0 - add r6xx/r7xx const buffer support 45 * - 2.2.0 - add r6xx/r7xx const buffer support
46 * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs
46 */ 47 */
47#define KMS_DRIVER_MAJOR 2 48#define KMS_DRIVER_MAJOR 2
48#define KMS_DRIVER_MINOR 2 49#define KMS_DRIVER_MINOR 3
49#define KMS_DRIVER_PATCHLEVEL 0 50#define KMS_DRIVER_PATCHLEVEL 0
50int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 51int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
51int radeon_driver_unload_kms(struct drm_device *dev); 52int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index c52fc3080b67..30293bec0801 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -865,6 +865,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
865 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 865 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
866 if (dig->coherent_mode) 866 if (dig->coherent_mode)
867 args.v3.acConfig.fCoherentMode = 1; 867 args.v3.acConfig.fCoherentMode = 1;
868 if (radeon_encoder->pixel_clock > 165000)
869 args.v3.acConfig.fDualLinkConnector = 1;
868 } 870 }
869 } else if (ASIC_IS_DCE32(rdev)) { 871 } else if (ASIC_IS_DCE32(rdev)) {
870 args.v2.acConfig.ucEncoderSel = dig->dig_encoder; 872 args.v2.acConfig.ucEncoderSel = dig->dig_encoder;
@@ -888,6 +890,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
888 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 890 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
889 if (dig->coherent_mode) 891 if (dig->coherent_mode)
890 args.v2.acConfig.fCoherentMode = 1; 892 args.v2.acConfig.fCoherentMode = 1;
893 if (radeon_encoder->pixel_clock > 165000)
894 args.v2.acConfig.fDualLinkConnector = 1;
891 } 895 }
892 } else { 896 } else {
893 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; 897 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
@@ -1373,8 +1377,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1373 case ENCODER_OBJECT_ID_INTERNAL_DAC2: 1377 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1374 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 1378 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1375 atombios_dac_setup(encoder, ATOM_ENABLE); 1379 atombios_dac_setup(encoder, ATOM_ENABLE);
1376 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) 1380 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
1377 atombios_tv_setup(encoder, ATOM_ENABLE); 1381 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
1382 atombios_tv_setup(encoder, ATOM_ENABLE);
1383 else
1384 atombios_tv_setup(encoder, ATOM_DISABLE);
1385 }
1378 break; 1386 break;
1379 } 1387 }
1380 atombios_apply_encoder_quirks(encoder, adjusted_mode); 1388 atombios_apply_encoder_quirks(encoder, adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 93c7d5d41914..e329066dcabd 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -36,7 +36,7 @@
36 * Radeon chip families 36 * Radeon chip families
37 */ 37 */
38enum radeon_family { 38enum radeon_family {
39 CHIP_R100, 39 CHIP_R100 = 0,
40 CHIP_RV100, 40 CHIP_RV100,
41 CHIP_RS100, 41 CHIP_RS100,
42 CHIP_RV200, 42 CHIP_RV200,
@@ -99,4 +99,5 @@ enum radeon_chip_flags {
99 RADEON_IS_PCI = 0x00800000UL, 99 RADEON_IS_PCI = 0x00800000UL,
100 RADEON_IS_IGPGART = 0x01000000UL, 100 RADEON_IS_IGPGART = 0x01000000UL,
101}; 101};
102
102#endif 103#endif
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300
index 19c4663fa9c6..1e97b2d129fd 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r300
+++ b/drivers/gpu/drm/radeon/reg_srcs/r300
@@ -125,6 +125,8 @@ r300 0x4f60
1250x4000 GB_VAP_RASTER_VTX_FMT_0 1250x4000 GB_VAP_RASTER_VTX_FMT_0
1260x4004 GB_VAP_RASTER_VTX_FMT_1 1260x4004 GB_VAP_RASTER_VTX_FMT_1
1270x4008 GB_ENABLE 1270x4008 GB_ENABLE
1280x4010 GB_MSPOS0
1290x4014 GB_MSPOS1
1280x401C GB_SELECT 1300x401C GB_SELECT
1290x4020 GB_AA_CONFIG 1310x4020 GB_AA_CONFIG
1300x4024 GB_FIFO_SIZE 1320x4024 GB_FIFO_SIZE
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420
index 989f7a020832..e958980d00f1 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r420
+++ b/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -125,6 +125,8 @@ r420 0x4f60
1250x4000 GB_VAP_RASTER_VTX_FMT_0 1250x4000 GB_VAP_RASTER_VTX_FMT_0
1260x4004 GB_VAP_RASTER_VTX_FMT_1 1260x4004 GB_VAP_RASTER_VTX_FMT_1
1270x4008 GB_ENABLE 1270x4008 GB_ENABLE
1280x4010 GB_MSPOS0
1290x4014 GB_MSPOS1
1280x401C GB_SELECT 1300x401C GB_SELECT
1290x4020 GB_AA_CONFIG 1310x4020 GB_AA_CONFIG
1300x4024 GB_FIFO_SIZE 1320x4024 GB_FIFO_SIZE
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
index 6801b865d1c4..83e8bc0c2bb2 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rs600
+++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -125,6 +125,8 @@ rs600 0x6d40
1250x4000 GB_VAP_RASTER_VTX_FMT_0 1250x4000 GB_VAP_RASTER_VTX_FMT_0
1260x4004 GB_VAP_RASTER_VTX_FMT_1 1260x4004 GB_VAP_RASTER_VTX_FMT_1
1270x4008 GB_ENABLE 1270x4008 GB_ENABLE
1280x4010 GB_MSPOS0
1290x4014 GB_MSPOS1
1280x401C GB_SELECT 1300x401C GB_SELECT
1290x4020 GB_AA_CONFIG 1310x4020 GB_AA_CONFIG
1300x4024 GB_FIFO_SIZE 1320x4024 GB_FIFO_SIZE
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index 38abf63bf2cd..1e46233985eb 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -35,6 +35,7 @@ rv515 0x6d40
350x1DA8 VAP_VPORT_ZSCALE 350x1DA8 VAP_VPORT_ZSCALE
360x1DAC VAP_VPORT_ZOFFSET 360x1DAC VAP_VPORT_ZOFFSET
370x2080 VAP_CNTL 370x2080 VAP_CNTL
380x208C VAP_INDEX_OFFSET
380x2090 VAP_OUT_VTX_FMT_0 390x2090 VAP_OUT_VTX_FMT_0
390x2094 VAP_OUT_VTX_FMT_1 400x2094 VAP_OUT_VTX_FMT_1
400x20B0 VAP_VTE_CNTL 410x20B0 VAP_VTE_CNTL
@@ -158,6 +159,8 @@ rv515 0x6d40
1580x4000 GB_VAP_RASTER_VTX_FMT_0 1590x4000 GB_VAP_RASTER_VTX_FMT_0
1590x4004 GB_VAP_RASTER_VTX_FMT_1 1600x4004 GB_VAP_RASTER_VTX_FMT_1
1600x4008 GB_ENABLE 1610x4008 GB_ENABLE
1620x4010 GB_MSPOS0
1630x4014 GB_MSPOS1
1610x401C GB_SELECT 1640x401C GB_SELECT
1620x4020 GB_AA_CONFIG 1650x4020 GB_AA_CONFIG
1630x4024 GB_FIFO_SIZE 1660x4024 GB_FIFO_SIZE
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index abf824c2123d..a81bc7a21e14 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -159,7 +159,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev)
159 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 159 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
160 160
161 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 161 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
162 tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1); 162 tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
163 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 163 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
164 164
165 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 165 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index c1605b528e8f..0f28d91f29d8 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -142,6 +142,12 @@ static const char *temperature_sensors_sets[][41] = {
142 "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P", "TM9S", 142 "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P", "TM9S",
143 "TN0C", "TN0D", "TN0H", "TS0C", "Tp0C", "Tp1C", "Tv0S", "Tv1S", 143 "TN0C", "TN0D", "TN0H", "TS0C", "Tp0C", "Tp1C", "Tv0S", "Tv1S",
144 NULL }, 144 NULL },
145/* Set 17: iMac 9,1 */
146 { "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TH0P", "TL0P",
147 "TN0D", "TN0H", "TN0P", "TO0P", "Tm0P", "Tp0P", NULL },
148/* Set 18: MacBook Pro 2,2 */
149 { "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "TM0P", "TTF0",
150 "Th0H", "Th1H", "Tm0P", "Ts0P", NULL },
145}; 151};
146 152
147/* List of keys used to read/write fan speeds */ 153/* List of keys used to read/write fan speeds */
@@ -1350,6 +1356,10 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = {
1350 { .accelerometer = 1, .light = 1, .temperature_set = 15 }, 1356 { .accelerometer = 1, .light = 1, .temperature_set = 15 },
1351/* MacPro3,1: temperature set 16 */ 1357/* MacPro3,1: temperature set 16 */
1352 { .accelerometer = 0, .light = 0, .temperature_set = 16 }, 1358 { .accelerometer = 0, .light = 0, .temperature_set = 16 },
1359/* iMac 9,1: light sensor only, temperature set 17 */
1360 { .accelerometer = 0, .light = 0, .temperature_set = 17 },
1361/* MacBook Pro 2,2: accelerometer, backlight and temperature set 18 */
1362 { .accelerometer = 1, .light = 1, .temperature_set = 18 },
1353}; 1363};
1354 1364
1355/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". 1365/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
@@ -1375,6 +1385,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
1375 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), 1385 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1376 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3") }, 1386 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3") },
1377 &applesmc_dmi_data[9]}, 1387 &applesmc_dmi_data[9]},
1388 { applesmc_dmi_match, "Apple MacBook Pro 2,2", {
1389 DMI_MATCH(DMI_BOARD_VENDOR, "Apple Computer, Inc."),
1390 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2") },
1391 &applesmc_dmi_data[18]},
1378 { applesmc_dmi_match, "Apple MacBook Pro", { 1392 { applesmc_dmi_match, "Apple MacBook Pro", {
1379 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), 1393 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
1380 DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro") }, 1394 DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro") },
@@ -1415,6 +1429,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
1415 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), 1429 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1416 DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") }, 1430 DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") },
1417 &applesmc_dmi_data[4]}, 1431 &applesmc_dmi_data[4]},
1432 { applesmc_dmi_match, "Apple iMac 9,1", {
1433 DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
1434 DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1") },
1435 &applesmc_dmi_data[17]},
1418 { applesmc_dmi_match, "Apple iMac 8", { 1436 { applesmc_dmi_match, "Apple iMac 8", {
1419 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), 1437 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1420 DMI_MATCH(DMI_PRODUCT_NAME, "iMac8") }, 1438 DMI_MATCH(DMI_PRODUCT_NAME, "iMac8") },
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 1002befd87d5..5be09c048c5f 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -539,14 +539,14 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr,
539 539
540 struct it87_data *data = dev_get_drvdata(dev); 540 struct it87_data *data = dev_get_drvdata(dev);
541 long val; 541 long val;
542 u8 reg;
542 543
543 if (strict_strtol(buf, 10, &val) < 0) 544 if (strict_strtol(buf, 10, &val) < 0)
544 return -EINVAL; 545 return -EINVAL;
545 546
546 mutex_lock(&data->update_lock); 547 reg = it87_read_value(data, IT87_REG_TEMP_ENABLE);
547 548 reg &= ~(1 << nr);
548 data->sensor &= ~(1 << nr); 549 reg &= ~(8 << nr);
549 data->sensor &= ~(8 << nr);
550 if (val == 2) { /* backwards compatibility */ 550 if (val == 2) { /* backwards compatibility */
551 dev_warn(dev, "Sensor type 2 is deprecated, please use 4 " 551 dev_warn(dev, "Sensor type 2 is deprecated, please use 4 "
552 "instead\n"); 552 "instead\n");
@@ -554,14 +554,16 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr,
554 } 554 }
555 /* 3 = thermal diode; 4 = thermistor; 0 = disabled */ 555 /* 3 = thermal diode; 4 = thermistor; 0 = disabled */
556 if (val == 3) 556 if (val == 3)
557 data->sensor |= 1 << nr; 557 reg |= 1 << nr;
558 else if (val == 4) 558 else if (val == 4)
559 data->sensor |= 8 << nr; 559 reg |= 8 << nr;
560 else if (val != 0) { 560 else if (val != 0)
561 mutex_unlock(&data->update_lock);
562 return -EINVAL; 561 return -EINVAL;
563 } 562
563 mutex_lock(&data->update_lock);
564 data->sensor = reg;
564 it87_write_value(data, IT87_REG_TEMP_ENABLE, data->sensor); 565 it87_write_value(data, IT87_REG_TEMP_ENABLE, data->sensor);
566 data->valid = 0; /* Force cache refresh */
565 mutex_unlock(&data->update_lock); 567 mutex_unlock(&data->update_lock);
566 return count; 568 return count;
567} 569}
@@ -1841,14 +1843,10 @@ static void __devinit it87_init_device(struct platform_device *pdev)
1841 it87_write_value(data, IT87_REG_TEMP_HIGH(i), 127); 1843 it87_write_value(data, IT87_REG_TEMP_HIGH(i), 127);
1842 } 1844 }
1843 1845
1844 /* Check if temperature channels are reset manually or by some reason */ 1846 /* Temperature channels are not forcibly enabled, as they can be
1845 tmp = it87_read_value(data, IT87_REG_TEMP_ENABLE); 1847 * set to two different sensor types and we can't guess which one
1846 if ((tmp & 0x3f) == 0) { 1848 * is correct for a given system. These channels can be enabled at
1847 /* Temp1,Temp3=thermistor; Temp2=thermal diode */ 1849 * run-time through the temp{1-3}_type sysfs accessors if needed. */
1848 tmp = (tmp & 0xc0) | 0x2a;
1849 it87_write_value(data, IT87_REG_TEMP_ENABLE, tmp);
1850 }
1851 data->sensor = tmp;
1852 1850
1853 /* Check if voltage monitors are reset manually or by some reason */ 1851 /* Check if voltage monitors are reset manually or by some reason */
1854 tmp = it87_read_value(data, IT87_REG_VIN_ENABLE); 1852 tmp = it87_read_value(data, IT87_REG_VIN_ENABLE);
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 6b2d8ae64fe1..a610e7880fb3 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -303,13 +303,13 @@ error_ret:
303 **/ 303 **/
304static inline int sht15_calc_temp(struct sht15_data *data) 304static inline int sht15_calc_temp(struct sht15_data *data)
305{ 305{
306 int d1 = 0; 306 int d1 = temppoints[0].d1;
307 int i; 307 int i;
308 308
309 for (i = 1; i < ARRAY_SIZE(temppoints); i++) 309 for (i = ARRAY_SIZE(temppoints) - 1; i > 0; i--)
310 /* Find pointer to interpolate */ 310 /* Find pointer to interpolate */
311 if (data->supply_uV > temppoints[i - 1].vdd) { 311 if (data->supply_uV > temppoints[i - 1].vdd) {
312 d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd) 312 d1 = (data->supply_uV - temppoints[i - 1].vdd)
313 * (temppoints[i].d1 - temppoints[i - 1].d1) 313 * (temppoints[i].d1 - temppoints[i - 1].d1)
314 / (temppoints[i].vdd - temppoints[i - 1].vdd) 314 / (temppoints[i].vdd - temppoints[i - 1].vdd)
315 + temppoints[i - 1].d1; 315 + temppoints[i - 1].d1;
@@ -542,7 +542,12 @@ static int __devinit sht15_probe(struct platform_device *pdev)
542/* If a regulator is available, query what the supply voltage actually is!*/ 542/* If a regulator is available, query what the supply voltage actually is!*/
543 data->reg = regulator_get(data->dev, "vcc"); 543 data->reg = regulator_get(data->dev, "vcc");
544 if (!IS_ERR(data->reg)) { 544 if (!IS_ERR(data->reg)) {
545 data->supply_uV = regulator_get_voltage(data->reg); 545 int voltage;
546
547 voltage = regulator_get_voltage(data->reg);
548 if (voltage)
549 data->supply_uV = voltage;
550
546 regulator_enable(data->reg); 551 regulator_enable(data->reg);
547 /* setup a notifier block to update this if another device 552 /* setup a notifier block to update this if another device
548 * causes the voltage to change */ 553 * causes the voltage to change */
diff --git a/drivers/input/input.c b/drivers/input/input.c
index afd4e2b7658c..9c79bd56b51a 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -660,7 +660,14 @@ static int input_default_setkeycode(struct input_dev *dev,
660int input_get_keycode(struct input_dev *dev, 660int input_get_keycode(struct input_dev *dev,
661 unsigned int scancode, unsigned int *keycode) 661 unsigned int scancode, unsigned int *keycode)
662{ 662{
663 return dev->getkeycode(dev, scancode, keycode); 663 unsigned long flags;
664 int retval;
665
666 spin_lock_irqsave(&dev->event_lock, flags);
667 retval = dev->getkeycode(dev, scancode, keycode);
668 spin_unlock_irqrestore(&dev->event_lock, flags);
669
670 return retval;
664} 671}
665EXPORT_SYMBOL(input_get_keycode); 672EXPORT_SYMBOL(input_get_keycode);
666 673
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index ffc25cfcef7a..b443e088fd3c 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -374,7 +374,9 @@ static int __devinit matrix_keypad_probe(struct platform_device *pdev)
374 input_dev->name = pdev->name; 374 input_dev->name = pdev->name;
375 input_dev->id.bustype = BUS_HOST; 375 input_dev->id.bustype = BUS_HOST;
376 input_dev->dev.parent = &pdev->dev; 376 input_dev->dev.parent = &pdev->dev;
377 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); 377 input_dev->evbit[0] = BIT_MASK(EV_KEY);
378 if (!pdata->no_autorepeat)
379 input_dev->evbit[0] |= BIT_MASK(EV_REP);
378 input_dev->open = matrix_keypad_start; 380 input_dev->open = matrix_keypad_start;
379 input_dev->close = matrix_keypad_stop; 381 input_dev->close = matrix_keypad_stop;
380 382
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 99d58764ef03..0d22cb9ce42e 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -64,6 +64,7 @@ static const struct alps_model_info alps_model_data[] = {
64 { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf, 64 { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf,
65 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, 65 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
66 { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ 66 { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
67 { { 0x73, 0x02, 0x64 }, 0xf8, 0xf8, 0 }, /* HP Pavilion dm3 */
67 { { 0x52, 0x01, 0x14 }, 0xff, 0xff, 68 { { 0x52, 0x01, 0x14 }, 0xff, 0xff,
68 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ 69 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
69}; 70};
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 4f8fe0886b2a..b89879bd860f 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -803,7 +803,6 @@ static struct usb_driver bcm5974_driver = {
803 .disconnect = bcm5974_disconnect, 803 .disconnect = bcm5974_disconnect,
804 .suspend = bcm5974_suspend, 804 .suspend = bcm5974_suspend,
805 .resume = bcm5974_resume, 805 .resume = bcm5974_resume,
806 .reset_resume = bcm5974_resume,
807 .id_table = bcm5974_table, 806 .id_table = bcm5974_table,
808 .supports_autosuspend = 1, 807 .supports_autosuspend = 1,
809}; 808};
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 577688b5b951..6440a8f55686 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -39,7 +39,7 @@ MODULE_PARM_DESC(noaux, "Do not probe or use AUX (mouse) port.");
39 39
40static bool i8042_nomux; 40static bool i8042_nomux;
41module_param_named(nomux, i8042_nomux, bool, 0); 41module_param_named(nomux, i8042_nomux, bool, 0);
42MODULE_PARM_DESC(nomux, "Do not check whether an active multiplexing conrtoller is present."); 42MODULE_PARM_DESC(nomux, "Do not check whether an active multiplexing controller is present.");
43 43
44static bool i8042_unlock; 44static bool i8042_unlock;
45module_param_named(unlock, i8042_unlock, bool, 0); 45module_param_named(unlock, i8042_unlock, bool, 0);
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
index 82ae18d29685..014248344763 100644
--- a/drivers/input/sparse-keymap.c
+++ b/drivers/input/sparse-keymap.c
@@ -68,12 +68,14 @@ static int sparse_keymap_getkeycode(struct input_dev *dev,
68 unsigned int scancode, 68 unsigned int scancode,
69 unsigned int *keycode) 69 unsigned int *keycode)
70{ 70{
71 const struct key_entry *key = 71 const struct key_entry *key;
72 sparse_keymap_entry_from_scancode(dev, scancode);
73 72
74 if (key && key->type == KE_KEY) { 73 if (dev->keycode) {
75 *keycode = key->keycode; 74 key = sparse_keymap_entry_from_scancode(dev, scancode);
76 return 0; 75 if (key && key->type == KE_KEY) {
76 *keycode = key->keycode;
77 return 0;
78 }
77 } 79 }
78 80
79 return -EINVAL; 81 return -EINVAL;
@@ -86,17 +88,16 @@ static int sparse_keymap_setkeycode(struct input_dev *dev,
86 struct key_entry *key; 88 struct key_entry *key;
87 int old_keycode; 89 int old_keycode;
88 90
89 if (keycode < 0 || keycode > KEY_MAX) 91 if (dev->keycode) {
90 return -EINVAL; 92 key = sparse_keymap_entry_from_scancode(dev, scancode);
91 93 if (key && key->type == KE_KEY) {
92 key = sparse_keymap_entry_from_scancode(dev, scancode); 94 old_keycode = key->keycode;
93 if (key && key->type == KE_KEY) { 95 key->keycode = keycode;
94 old_keycode = key->keycode; 96 set_bit(keycode, dev->keybit);
95 key->keycode = keycode; 97 if (!sparse_keymap_entry_from_keycode(dev, old_keycode))
96 set_bit(keycode, dev->keybit); 98 clear_bit(old_keycode, dev->keybit);
97 if (!sparse_keymap_entry_from_keycode(dev, old_keycode)) 99 return 0;
98 clear_bit(old_keycode, dev->keybit); 100 }
99 return 0;
100 } 101 }
101 102
102 return -EINVAL; 103 return -EINVAL;
@@ -164,7 +165,7 @@ int sparse_keymap_setup(struct input_dev *dev,
164 return 0; 165 return 0;
165 166
166 err_out: 167 err_out:
167 kfree(keymap); 168 kfree(map);
168 return error; 169 return error;
169 170
170} 171}
@@ -176,14 +177,27 @@ EXPORT_SYMBOL(sparse_keymap_setup);
176 * 177 *
177 * This function is used to free memory allocated by sparse keymap 178 * This function is used to free memory allocated by sparse keymap
178 * in an input device that was set up by sparse_keymap_setup(). 179 * in an input device that was set up by sparse_keymap_setup().
180 * NOTE: It is safe to cal this function while input device is
181 * still registered (however the drivers should care not to try to
182 * use freed keymap and thus have to shut off interrups/polling
183 * before freeing the keymap).
179 */ 184 */
180void sparse_keymap_free(struct input_dev *dev) 185void sparse_keymap_free(struct input_dev *dev)
181{ 186{
187 unsigned long flags;
188
189 /*
190 * Take event lock to prevent racing with input_get_keycode()
191 * and input_set_keycode() if we are called while input device
192 * is still registered.
193 */
194 spin_lock_irqsave(&dev->event_lock, flags);
195
182 kfree(dev->keycode); 196 kfree(dev->keycode);
183 dev->keycode = NULL; 197 dev->keycode = NULL;
184 dev->keycodemax = 0; 198 dev->keycodemax = 0;
185 dev->getkeycode = NULL; 199
186 dev->setkeycode = NULL; 200 spin_unlock_irqrestore(&dev->event_lock, flags);
187} 201}
188EXPORT_SYMBOL(sparse_keymap_free); 202EXPORT_SYMBOL(sparse_keymap_free);
189 203
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 8b5d2873f0c4..f46502589e4e 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -673,13 +673,15 @@ static int wacom_resume(struct usb_interface *intf)
673 int rv; 673 int rv;
674 674
675 mutex_lock(&wacom->lock); 675 mutex_lock(&wacom->lock);
676 if (wacom->open) { 676
677 /* switch to wacom mode first */
678 wacom_query_tablet_data(intf, features);
679
680 if (wacom->open)
677 rv = usb_submit_urb(wacom->irq, GFP_NOIO); 681 rv = usb_submit_urb(wacom->irq, GFP_NOIO);
678 /* switch to wacom mode if needed */ 682 else
679 if (!wacom_retrieve_hid_descriptor(intf, features))
680 wacom_query_tablet_data(intf, features);
681 } else
682 rv = 0; 683 rv = 0;
684
683 mutex_unlock(&wacom->lock); 685 mutex_unlock(&wacom->lock);
684 686
685 return rv; 687 return rv;
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index b3ba3437a2eb..4a852d815c68 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -155,19 +155,19 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
155{ 155{
156 struct wacom_features *features = &wacom->features; 156 struct wacom_features *features = &wacom->features;
157 unsigned char *data = wacom->data; 157 unsigned char *data = wacom->data;
158 int x, y, prox; 158 int x, y, rw;
159 int rw = 0; 159 static int penData = 0;
160 int retval = 0;
161 160
162 if (data[0] != WACOM_REPORT_PENABLED) { 161 if (data[0] != WACOM_REPORT_PENABLED) {
163 dbg("wacom_graphire_irq: received unknown report #%d", data[0]); 162 dbg("wacom_graphire_irq: received unknown report #%d", data[0]);
164 goto exit; 163 return 0;
165 } 164 }
166 165
167 prox = data[1] & 0x80; 166 if (data[1] & 0x80) {
168 if (prox || wacom->id[0]) { 167 /* in prox and not a pad data */
169 if (prox) { 168 penData = 1;
170 switch ((data[1] >> 5) & 3) { 169
170 switch ((data[1] >> 5) & 3) {
171 171
172 case 0: /* Pen */ 172 case 0: /* Pen */
173 wacom->tool[0] = BTN_TOOL_PEN; 173 wacom->tool[0] = BTN_TOOL_PEN;
@@ -181,13 +181,23 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
181 181
182 case 2: /* Mouse with wheel */ 182 case 2: /* Mouse with wheel */
183 wacom_report_key(wcombo, BTN_MIDDLE, data[1] & 0x04); 183 wacom_report_key(wcombo, BTN_MIDDLE, data[1] & 0x04);
184 if (features->type == WACOM_G4 || features->type == WACOM_MO) {
185 rw = data[7] & 0x04 ? (data[7] & 0x03)-4 : (data[7] & 0x03);
186 wacom_report_rel(wcombo, REL_WHEEL, -rw);
187 } else
188 wacom_report_rel(wcombo, REL_WHEEL, -(signed char) data[6]);
184 /* fall through */ 189 /* fall through */
185 190
186 case 3: /* Mouse without wheel */ 191 case 3: /* Mouse without wheel */
187 wacom->tool[0] = BTN_TOOL_MOUSE; 192 wacom->tool[0] = BTN_TOOL_MOUSE;
188 wacom->id[0] = CURSOR_DEVICE_ID; 193 wacom->id[0] = CURSOR_DEVICE_ID;
194 wacom_report_key(wcombo, BTN_LEFT, data[1] & 0x01);
195 wacom_report_key(wcombo, BTN_RIGHT, data[1] & 0x02);
196 if (features->type == WACOM_G4 || features->type == WACOM_MO)
197 wacom_report_abs(wcombo, ABS_DISTANCE, data[6] & 0x3f);
198 else
199 wacom_report_abs(wcombo, ABS_DISTANCE, data[7] & 0x3f);
189 break; 200 break;
190 }
191 } 201 }
192 x = wacom_le16_to_cpu(&data[2]); 202 x = wacom_le16_to_cpu(&data[2]);
193 y = wacom_le16_to_cpu(&data[4]); 203 y = wacom_le16_to_cpu(&data[4]);
@@ -198,32 +208,36 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
198 wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x01); 208 wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x01);
199 wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02); 209 wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02);
200 wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x04); 210 wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x04);
201 } else {
202 wacom_report_key(wcombo, BTN_LEFT, data[1] & 0x01);
203 wacom_report_key(wcombo, BTN_RIGHT, data[1] & 0x02);
204 if (features->type == WACOM_G4 ||
205 features->type == WACOM_MO) {
206 wacom_report_abs(wcombo, ABS_DISTANCE, data[6] & 0x3f);
207 rw = (signed)(data[7] & 0x04) - (data[7] & 0x03);
208 } else {
209 wacom_report_abs(wcombo, ABS_DISTANCE, data[7] & 0x3f);
210 rw = -(signed)data[6];
211 }
212 wacom_report_rel(wcombo, REL_WHEEL, rw);
213 } 211 }
214
215 if (!prox)
216 wacom->id[0] = 0;
217 wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */ 212 wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */
218 wacom_report_key(wcombo, wacom->tool[0], prox); 213 wacom_report_key(wcombo, wacom->tool[0], 1);
219 wacom_input_sync(wcombo); /* sync last event */ 214 } else if (wacom->id[0]) {
215 wacom_report_abs(wcombo, ABS_X, 0);
216 wacom_report_abs(wcombo, ABS_Y, 0);
217 if (wacom->tool[0] == BTN_TOOL_MOUSE) {
218 wacom_report_key(wcombo, BTN_LEFT, 0);
219 wacom_report_key(wcombo, BTN_RIGHT, 0);
220 wacom_report_abs(wcombo, ABS_DISTANCE, 0);
221 } else {
222 wacom_report_abs(wcombo, ABS_PRESSURE, 0);
223 wacom_report_key(wcombo, BTN_TOUCH, 0);
224 wacom_report_key(wcombo, BTN_STYLUS, 0);
225 wacom_report_key(wcombo, BTN_STYLUS2, 0);
226 }
227 wacom->id[0] = 0;
228 wacom_report_abs(wcombo, ABS_MISC, 0); /* reset tool id */
229 wacom_report_key(wcombo, wacom->tool[0], 0);
220 } 230 }
221 231
222 /* send pad data */ 232 /* send pad data */
223 switch (features->type) { 233 switch (features->type) {
224 case WACOM_G4: 234 case WACOM_G4:
225 prox = data[7] & 0xf8; 235 if (data[7] & 0xf8) {
226 if (prox || wacom->id[1]) { 236 if (penData) {
237 wacom_input_sync(wcombo); /* sync last event */
238 if (!wacom->id[0])
239 penData = 0;
240 }
227 wacom->id[1] = PAD_DEVICE_ID; 241 wacom->id[1] = PAD_DEVICE_ID;
228 wacom_report_key(wcombo, BTN_0, (data[7] & 0x40)); 242 wacom_report_key(wcombo, BTN_0, (data[7] & 0x40));
229 wacom_report_key(wcombo, BTN_4, (data[7] & 0x80)); 243 wacom_report_key(wcombo, BTN_4, (data[7] & 0x80));
@@ -231,16 +245,29 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
231 wacom_report_rel(wcombo, REL_WHEEL, rw); 245 wacom_report_rel(wcombo, REL_WHEEL, rw);
232 wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0); 246 wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0);
233 wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); 247 wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]);
234 if (!prox) 248 wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
235 wacom->id[1] = 0; 249 } else if (wacom->id[1]) {
236 wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); 250 if (penData) {
251 wacom_input_sync(wcombo); /* sync last event */
252 if (!wacom->id[0])
253 penData = 0;
254 }
255 wacom->id[1] = 0;
256 wacom_report_key(wcombo, BTN_0, (data[7] & 0x40));
257 wacom_report_key(wcombo, BTN_4, (data[7] & 0x80));
258 wacom_report_rel(wcombo, REL_WHEEL, 0);
259 wacom_report_key(wcombo, BTN_TOOL_FINGER, 0);
260 wacom_report_abs(wcombo, ABS_MISC, 0);
237 wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); 261 wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
238 } 262 }
239 retval = 1;
240 break; 263 break;
241 case WACOM_MO: 264 case WACOM_MO:
242 prox = (data[7] & 0xf8) || data[8]; 265 if ((data[7] & 0xf8) || (data[8] & 0xff)) {
243 if (prox || wacom->id[1]) { 266 if (penData) {
267 wacom_input_sync(wcombo); /* sync last event */
268 if (!wacom->id[0])
269 penData = 0;
270 }
244 wacom->id[1] = PAD_DEVICE_ID; 271 wacom->id[1] = PAD_DEVICE_ID;
245 wacom_report_key(wcombo, BTN_0, (data[7] & 0x08)); 272 wacom_report_key(wcombo, BTN_0, (data[7] & 0x08));
246 wacom_report_key(wcombo, BTN_1, (data[7] & 0x20)); 273 wacom_report_key(wcombo, BTN_1, (data[7] & 0x20));
@@ -248,16 +275,27 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
248 wacom_report_key(wcombo, BTN_5, (data[7] & 0x40)); 275 wacom_report_key(wcombo, BTN_5, (data[7] & 0x40));
249 wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f)); 276 wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f));
250 wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0); 277 wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0);
251 if (!prox)
252 wacom->id[1] = 0;
253 wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); 278 wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]);
254 wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); 279 wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
280 } else if (wacom->id[1]) {
281 if (penData) {
282 wacom_input_sync(wcombo); /* sync last event */
283 if (!wacom->id[0])
284 penData = 0;
285 }
286 wacom->id[1] = 0;
287 wacom_report_key(wcombo, BTN_0, (data[7] & 0x08));
288 wacom_report_key(wcombo, BTN_1, (data[7] & 0x20));
289 wacom_report_key(wcombo, BTN_4, (data[7] & 0x10));
290 wacom_report_key(wcombo, BTN_5, (data[7] & 0x40));
291 wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f));
292 wacom_report_key(wcombo, BTN_TOOL_FINGER, 0);
293 wacom_report_abs(wcombo, ABS_MISC, 0);
294 wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
255 } 295 }
256 retval = 1;
257 break; 296 break;
258 } 297 }
259exit: 298 return 1;
260 return retval;
261} 299}
262 300
263static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo) 301static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo)
@@ -598,9 +636,9 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
598static void wacom_tpc_finger_in(struct wacom_wac *wacom, void *wcombo, char *data, int idx) 636static void wacom_tpc_finger_in(struct wacom_wac *wacom, void *wcombo, char *data, int idx)
599{ 637{
600 wacom_report_abs(wcombo, ABS_X, 638 wacom_report_abs(wcombo, ABS_X,
601 data[2 + idx * 2] | ((data[3 + idx * 2] & 0x7f) << 8)); 639 (data[2 + idx * 2] & 0xff) | ((data[3 + idx * 2] & 0x7f) << 8));
602 wacom_report_abs(wcombo, ABS_Y, 640 wacom_report_abs(wcombo, ABS_Y,
603 data[6 + idx * 2] | ((data[7 + idx * 2] & 0x7f) << 8)); 641 (data[6 + idx * 2] & 0xff) | ((data[7 + idx * 2] & 0x7f) << 8));
604 wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); 642 wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
605 wacom_report_key(wcombo, wacom->tool[idx], 1); 643 wacom_report_key(wcombo, wacom->tool[idx], 1);
606 if (idx) 644 if (idx)
@@ -744,24 +782,31 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, void *wcombo)
744 782
745 touchInProx = 0; 783 touchInProx = 0;
746 784
747 if (!wacom->id[0]) { /* first in prox */ 785 if (prox) { /* in prox */
748 /* Going into proximity select tool */ 786 if (!wacom->id[0]) {
749 wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; 787 /* Going into proximity select tool */
750 if (wacom->tool[0] == BTN_TOOL_PEN) 788 wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
751 wacom->id[0] = STYLUS_DEVICE_ID; 789 if (wacom->tool[0] == BTN_TOOL_PEN)
752 else 790 wacom->id[0] = STYLUS_DEVICE_ID;
753 wacom->id[0] = ERASER_DEVICE_ID; 791 else
754 } 792 wacom->id[0] = ERASER_DEVICE_ID;
755 wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02); 793 }
756 wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10); 794 wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02);
757 wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2])); 795 wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10);
758 wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4])); 796 wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2]));
759 pressure = ((data[7] & 0x01) << 8) | data[6]; 797 wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4]));
760 if (pressure < 0) 798 pressure = ((data[7] & 0x01) << 8) | data[6];
761 pressure = features->pressure_max + pressure + 1; 799 if (pressure < 0)
762 wacom_report_abs(wcombo, ABS_PRESSURE, pressure); 800 pressure = features->pressure_max + pressure + 1;
763 wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x05); 801 wacom_report_abs(wcombo, ABS_PRESSURE, pressure);
764 if (!prox) { /* out-prox */ 802 wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x05);
803 } else {
804 wacom_report_abs(wcombo, ABS_X, 0);
805 wacom_report_abs(wcombo, ABS_Y, 0);
806 wacom_report_abs(wcombo, ABS_PRESSURE, 0);
807 wacom_report_key(wcombo, BTN_STYLUS, 0);
808 wacom_report_key(wcombo, BTN_STYLUS2, 0);
809 wacom_report_key(wcombo, BTN_TOUCH, 0);
765 wacom->id[0] = 0; 810 wacom->id[0] = 0;
766 /* pen is out so touch can be enabled now */ 811 /* pen is out so touch can be enabled now */
767 touchInProx = 1; 812 touchInProx = 1;
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 0be15c70c16d..47a5ffec55a3 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -14,11 +14,6 @@
14 */ 14 */
15 15
16#include "gigaset.h" 16#include "gigaset.h"
17
18#include <linux/errno.h>
19#include <linux/init.h>
20#include <linux/slab.h>
21#include <linux/timer.h>
22#include <linux/usb.h> 17#include <linux/usb.h>
23#include <linux/module.h> 18#include <linux/module.h>
24#include <linux/moduleparam.h> 19#include <linux/moduleparam.h>
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index eb7e27105a82..964a55fb1486 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -12,8 +12,6 @@
12 */ 12 */
13 13
14#include "gigaset.h" 14#include "gigaset.h"
15#include <linux/slab.h>
16#include <linux/ctype.h>
17#include <linux/proc_fs.h> 15#include <linux/proc_fs.h>
18#include <linux/seq_file.h> 16#include <linux/seq_file.h>
19#include <linux/isdn/capilli.h> 17#include <linux/isdn/capilli.h>
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 0b39b387c125..f6f45f221920 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -14,10 +14,8 @@
14 */ 14 */
15 15
16#include "gigaset.h" 16#include "gigaset.h"
17#include <linux/ctype.h>
18#include <linux/module.h> 17#include <linux/module.h>
19#include <linux/moduleparam.h> 18#include <linux/moduleparam.h>
20#include <linux/slab.h>
21 19
22/* Version Information */ 20/* Version Information */
23#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers" 21#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers"
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index 9ef5b0463fd5..05947f9c1849 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -20,11 +20,12 @@
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 21
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/sched.h>
23#include <linux/compiler.h> 24#include <linux/compiler.h>
24#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/ctype.h>
25#include <linux/slab.h> 27#include <linux/slab.h>
26#include <linux/spinlock.h> 28#include <linux/spinlock.h>
27#include <linux/usb.h>
28#include <linux/skbuff.h> 29#include <linux/skbuff.h>
29#include <linux/netdevice.h> 30#include <linux/netdevice.h>
30#include <linux/ppp_defs.h> 31#include <linux/ppp_defs.h>
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index c99fb9790a13..c22e5ace8276 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -15,7 +15,6 @@
15 15
16#include "gigaset.h" 16#include "gigaset.h"
17#include <linux/isdnif.h> 17#include <linux/isdnif.h>
18#include <linux/slab.h>
19 18
20#define HW_HDR_LEN 2 /* Header size used to store ack info */ 19#define HW_HDR_LEN 2 /* Header size used to store ack info */
21 20
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index f0dc6c9cc283..c9f28dd40d5c 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -13,7 +13,6 @@
13 13
14#include "gigaset.h" 14#include "gigaset.h"
15#include <linux/gigaset_dev.h> 15#include <linux/gigaset_dev.h>
16#include <linux/tty.h>
17#include <linux/tty_flip.h> 16#include <linux/tty_flip.h>
18 17
19/*** our ioctls ***/ 18/*** our ioctls ***/
diff --git a/drivers/isdn/gigaset/proc.c b/drivers/isdn/gigaset/proc.c
index b69f73a0668f..b943efbff44d 100644
--- a/drivers/isdn/gigaset/proc.c
+++ b/drivers/isdn/gigaset/proc.c
@@ -14,7 +14,6 @@
14 */ 14 */
15 15
16#include "gigaset.h" 16#include "gigaset.h"
17#include <linux/ctype.h>
18 17
19static ssize_t show_cidmode(struct device *dev, 18static ssize_t show_cidmode(struct device *dev,
20 struct device_attribute *attr, char *buf) 19 struct device_attribute *attr, char *buf)
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 8b0afd203a07..e96c0586886c 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -11,13 +11,10 @@
11 */ 11 */
12 12
13#include "gigaset.h" 13#include "gigaset.h"
14
15#include <linux/module.h> 14#include <linux/module.h>
16#include <linux/moduleparam.h> 15#include <linux/moduleparam.h>
17#include <linux/platform_device.h> 16#include <linux/platform_device.h>
18#include <linux/tty.h>
19#include <linux/completion.h> 17#include <linux/completion.h>
20#include <linux/slab.h>
21 18
22/* Version Information */ 19/* Version Information */
23#define DRIVER_AUTHOR "Tilman Schmidt" 20#define DRIVER_AUTHOR "Tilman Schmidt"
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 9430a2bbb523..76dbb20f3065 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -16,10 +16,6 @@
16 */ 16 */
17 17
18#include "gigaset.h" 18#include "gigaset.h"
19
20#include <linux/errno.h>
21#include <linux/init.h>
22#include <linux/slab.h>
23#include <linux/usb.h> 19#include <linux/usb.h>
24#include <linux/module.h> 20#include <linux/module.h>
25#include <linux/moduleparam.h> 21#include <linux/moduleparam.h>
diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
index efcf1f9327e5..fd10d7c785d4 100644
--- a/drivers/isdn/i4l/isdn_x25iface.c
+++ b/drivers/isdn/i4l/isdn_x25iface.c
@@ -194,7 +194,7 @@ static int isdn_x25iface_receive(struct concap_proto *cprot, struct sk_buff *skb
194 if ( ( (ix25_pdata_t*) (cprot->proto_data) ) 194 if ( ( (ix25_pdata_t*) (cprot->proto_data) )
195 -> state == WAN_CONNECTED ){ 195 -> state == WAN_CONNECTED ){
196 if( skb_push(skb, 1)){ 196 if( skb_push(skb, 1)){
197 skb -> data[0]=0x00; 197 skb->data[0] = X25_IFACE_DATA;
198 skb->protocol = x25_type_trans(skb, cprot->net_dev); 198 skb->protocol = x25_type_trans(skb, cprot->net_dev);
199 netif_rx(skb); 199 netif_rx(skb);
200 return 0; 200 return 0;
@@ -224,7 +224,7 @@ static int isdn_x25iface_connect_ind(struct concap_proto *cprot)
224 224
225 skb = dev_alloc_skb(1); 225 skb = dev_alloc_skb(1);
226 if( skb ){ 226 if( skb ){
227 *( skb_put(skb, 1) ) = 0x01; 227 *(skb_put(skb, 1)) = X25_IFACE_CONNECT;
228 skb->protocol = x25_type_trans(skb, cprot->net_dev); 228 skb->protocol = x25_type_trans(skb, cprot->net_dev);
229 netif_rx(skb); 229 netif_rx(skb);
230 return 0; 230 return 0;
@@ -253,7 +253,7 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *cprot)
253 *state_p = WAN_DISCONNECTED; 253 *state_p = WAN_DISCONNECTED;
254 skb = dev_alloc_skb(1); 254 skb = dev_alloc_skb(1);
255 if( skb ){ 255 if( skb ){
256 *( skb_put(skb, 1) ) = 0x02; 256 *(skb_put(skb, 1)) = X25_IFACE_DISCONNECT;
257 skb->protocol = x25_type_trans(skb, cprot->net_dev); 257 skb->protocol = x25_type_trans(skb, cprot->net_dev);
258 netif_rx(skb); 258 netif_rx(skb);
259 return 0; 259 return 0;
@@ -272,9 +272,10 @@ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
272 unsigned char firstbyte = skb->data[0]; 272 unsigned char firstbyte = skb->data[0];
273 enum wan_states *state = &((ix25_pdata_t*)cprot->proto_data)->state; 273 enum wan_states *state = &((ix25_pdata_t*)cprot->proto_data)->state;
274 int ret = 0; 274 int ret = 0;
275 IX25DEBUG( "isdn_x25iface_xmit: %s first=%x state=%d \n", MY_DEVNAME(cprot -> net_dev), firstbyte, *state ); 275 IX25DEBUG("isdn_x25iface_xmit: %s first=%x state=%d\n",
276 MY_DEVNAME(cprot->net_dev), firstbyte, *state);
276 switch ( firstbyte ){ 277 switch ( firstbyte ){
277 case 0x00: /* dl_data request */ 278 case X25_IFACE_DATA:
278 if( *state == WAN_CONNECTED ){ 279 if( *state == WAN_CONNECTED ){
279 skb_pull(skb, 1); 280 skb_pull(skb, 1);
280 cprot -> net_dev -> trans_start = jiffies; 281 cprot -> net_dev -> trans_start = jiffies;
@@ -285,7 +286,7 @@ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
285 } 286 }
286 illegal_state_warn( *state, firstbyte ); 287 illegal_state_warn( *state, firstbyte );
287 break; 288 break;
288 case 0x01: /* dl_connect request */ 289 case X25_IFACE_CONNECT:
289 if( *state == WAN_DISCONNECTED ){ 290 if( *state == WAN_DISCONNECTED ){
290 *state = WAN_CONNECTING; 291 *state = WAN_CONNECTING;
291 ret = cprot -> dops -> connect_req(cprot); 292 ret = cprot -> dops -> connect_req(cprot);
@@ -298,7 +299,7 @@ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
298 illegal_state_warn( *state, firstbyte ); 299 illegal_state_warn( *state, firstbyte );
299 } 300 }
300 break; 301 break;
301 case 0x02: /* dl_disconnect request */ 302 case X25_IFACE_DISCONNECT:
302 switch ( *state ){ 303 switch ( *state ){
303 case WAN_DISCONNECTED: 304 case WAN_DISCONNECTED:
304 /* Should not happen. However, give upper layer a 305 /* Should not happen. However, give upper layer a
@@ -318,7 +319,7 @@ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
318 illegal_state_warn( *state, firstbyte ); 319 illegal_state_warn( *state, firstbyte );
319 } 320 }
320 break; 321 break;
321 case 0x03: /* changing lapb parameters requested */ 322 case X25_IFACE_PARAMS:
322 printk(KERN_WARNING "isdn_x25iface_xmit: setting of lapb" 323 printk(KERN_WARNING "isdn_x25iface_xmit: setting of lapb"
323 " options not yet supported\n"); 324 " options not yet supported\n");
324 break; 325 break;
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index 07090f379c63..69c84a1d88ea 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -178,7 +178,7 @@ static void set_status(struct virtio_device *vdev, u8 status)
178 178
179 /* We set the status. */ 179 /* We set the status. */
180 to_lgdev(vdev)->desc->status = status; 180 to_lgdev(vdev)->desc->status = status;
181 kvm_hypercall1(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset); 181 hcall(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset, 0, 0, 0);
182} 182}
183 183
184static void lg_set_status(struct virtio_device *vdev, u8 status) 184static void lg_set_status(struct virtio_device *vdev, u8 status)
@@ -229,7 +229,7 @@ static void lg_notify(struct virtqueue *vq)
229 */ 229 */
230 struct lguest_vq_info *lvq = vq->priv; 230 struct lguest_vq_info *lvq = vq->priv;
231 231
232 kvm_hypercall1(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT); 232 hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0, 0);
233} 233}
234 234
235/* An extern declaration inside a C file is bad form. Don't do it. */ 235/* An extern declaration inside a C file is bad form. Don't do it. */
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index fb2b7ef7868e..b4eb675a807e 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -288,6 +288,18 @@ static int emulate_insn(struct lg_cpu *cpu)
288 insn = lgread(cpu, physaddr, u8); 288 insn = lgread(cpu, physaddr, u8);
289 289
290 /* 290 /*
291 * Around 2.6.33, the kernel started using an emulation for the
292 * cmpxchg8b instruction in early boot on many configurations. This
293 * code isn't paravirtualized, and it tries to disable interrupts.
294 * Ignore it, which will Mostly Work.
295 */
296 if (insn == 0xfa) {
297 /* "cli", or Clear Interrupt Enable instruction. Skip it. */
298 cpu->regs->eip++;
299 return 1;
300 }
301
302 /*
291 * 0x66 is an "operand prefix". It means it's using the upper 16 bits 303 * 0x66 is an "operand prefix". It means it's using the upper 16 bits
292 * of the eax register. 304 * of the eax register.
293 */ 305 */
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index b32b7a1710b7..9e95afa49fbe 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -449,7 +449,6 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr)
449 pr_debug("%s", version); 449 pr_debug("%s", version);
450 450
451 lp = netdev_priv(dev); 451 lp = netdev_priv(dev);
452 memset(lp, 0, sizeof(*lp));
453 spin_lock_init(&lp->lock); 452 spin_lock_init(&lp->lock);
454 lp->base = ioremap(dev->mem_start, RX_BUF_END); 453 lp->base = ioremap(dev->mem_start, RX_BUF_END);
455 if (!lp->base) { 454 if (!lp->base) {
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index 8c70686d43a1..55d219e585fb 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -503,7 +503,6 @@ static int __init do_elmc_probe(struct net_device *dev)
503 break; 503 break;
504 } 504 }
505 505
506 memset(pr, 0, sizeof(struct priv));
507 pr->slot = slot; 506 pr->slot = slot;
508 507
509 pr_info("%s: 3Com 3c523 Rev 0x%x at %#lx\n", dev->name, (int) revision, 508 pr_info("%s: 3Com 3c523 Rev 0x%x at %#lx\n", dev->name, (int) revision,
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index d149624b4a44..d0cb372a0f0d 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -1944,7 +1944,7 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
1944 netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n", 1944 netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n",
1945 __func__, rx_status, rx_size, cur_rx); 1945 __func__, rx_status, rx_size, cur_rx);
1946#if RTL8139_DEBUG > 2 1946#if RTL8139_DEBUG > 2
1947 print_dump_hex(KERN_DEBUG, "Frame contents: ", 1947 print_hex_dump(KERN_DEBUG, "Frame contents: ",
1948 DUMP_PREFIX_OFFSET, 16, 1, 1948 DUMP_PREFIX_OFFSET, 16, 1,
1949 &rx_ring[ring_offset], 70, true); 1949 &rx_ring[ring_offset], 70, true);
1950#endif 1950#endif
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index dbd26f992158..b9e7618a1473 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1453,20 +1453,6 @@ config FORCEDETH
1453 To compile this driver as a module, choose M here. The module 1453 To compile this driver as a module, choose M here. The module
1454 will be called forcedeth. 1454 will be called forcedeth.
1455 1455
1456config FORCEDETH_NAPI
1457 bool "Use Rx Polling (NAPI) (EXPERIMENTAL)"
1458 depends on FORCEDETH && EXPERIMENTAL
1459 help
1460 NAPI is a new driver API designed to reduce CPU and interrupt load
1461 when the driver is receiving lots of packets from the card. It is
1462 still somewhat experimental and thus not yet enabled by default.
1463
1464 If your estimated Rx load is 10kpps or more, or if the card will be
1465 deployed on potentially unfriendly networks (e.g. in a firewall),
1466 then say Y here.
1467
1468 If in doubt, say N.
1469
1470config CS89x0 1456config CS89x0
1471 tristate "CS89x0 support" 1457 tristate "CS89x0 support"
1472 depends on NET_ETHERNET && (ISA || EISA || MACH_IXDP2351 \ 1458 depends on NET_ETHERNET && (ISA || EISA || MACH_IXDP2351 \
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index ebf80b983063..0a0512ae77da 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -273,6 +273,7 @@ obj-$(CONFIG_USB_RTL8150) += usb/
273obj-$(CONFIG_USB_HSO) += usb/ 273obj-$(CONFIG_USB_HSO) += usb/
274obj-$(CONFIG_USB_USBNET) += usb/ 274obj-$(CONFIG_USB_USBNET) += usb/
275obj-$(CONFIG_USB_ZD1201) += usb/ 275obj-$(CONFIG_USB_ZD1201) += usb/
276obj-$(CONFIG_USB_IPHETH) += usb/
276 277
277obj-y += wireless/ 278obj-y += wireless/
278obj-$(CONFIG_NET_TULIP) += tulip/ 279obj-$(CONFIG_NET_TULIP) += tulip/
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 7413a87e40ff..6404704f7eac 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -1472,7 +1472,6 @@ ks8695_probe(struct platform_device *pdev)
1472 1472
1473 /* Configure our private structure a little */ 1473 /* Configure our private structure a little */
1474 ksp = netdev_priv(ndev); 1474 ksp = netdev_priv(ndev);
1475 memset(ksp, 0, sizeof(struct ks8695_priv));
1476 1475
1477 ksp->dev = &pdev->dev; 1476 ksp->dev = &pdev->dev;
1478 ksp->ndev = ndev; 1477 ksp->ndev = ndev;
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 9a8bdea4a8ec..f48ba80025a2 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -1647,7 +1647,6 @@ static int __devinit bcm_enet_probe(struct platform_device *pdev)
1647 if (!dev) 1647 if (!dev)
1648 return -ENOMEM; 1648 return -ENOMEM;
1649 priv = netdev_priv(dev); 1649 priv = netdev_priv(dev);
1650 memset(priv, 0, sizeof(*priv));
1651 1650
1652 ret = compute_hw_mtu(priv, dev->mtu); 1651 ret = compute_hw_mtu(priv, dev->mtu);
1653 if (ret) 1652 if (ret)
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 53326fed6c81..667f4196dc29 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -58,8 +58,8 @@
58#include "bnx2_fw.h" 58#include "bnx2_fw.h"
59 59
60#define DRV_MODULE_NAME "bnx2" 60#define DRV_MODULE_NAME "bnx2"
61#define DRV_MODULE_VERSION "2.0.8" 61#define DRV_MODULE_VERSION "2.0.9"
62#define DRV_MODULE_RELDATE "Feb 15, 2010" 62#define DRV_MODULE_RELDATE "April 27, 2010"
63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw" 63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw" 64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw" 65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
@@ -651,9 +651,10 @@ bnx2_napi_enable(struct bnx2 *bp)
651} 651}
652 652
653static void 653static void
654bnx2_netif_stop(struct bnx2 *bp) 654bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
655{ 655{
656 bnx2_cnic_stop(bp); 656 if (stop_cnic)
657 bnx2_cnic_stop(bp);
657 if (netif_running(bp->dev)) { 658 if (netif_running(bp->dev)) {
658 int i; 659 int i;
659 660
@@ -671,14 +672,15 @@ bnx2_netif_stop(struct bnx2 *bp)
671} 672}
672 673
673static void 674static void
674bnx2_netif_start(struct bnx2 *bp) 675bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
675{ 676{
676 if (atomic_dec_and_test(&bp->intr_sem)) { 677 if (atomic_dec_and_test(&bp->intr_sem)) {
677 if (netif_running(bp->dev)) { 678 if (netif_running(bp->dev)) {
678 netif_tx_wake_all_queues(bp->dev); 679 netif_tx_wake_all_queues(bp->dev);
679 bnx2_napi_enable(bp); 680 bnx2_napi_enable(bp);
680 bnx2_enable_int(bp); 681 bnx2_enable_int(bp);
681 bnx2_cnic_start(bp); 682 if (start_cnic)
683 bnx2_cnic_start(bp);
682 } 684 }
683 } 685 }
684} 686}
@@ -2717,6 +2719,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2717 } 2719 }
2718 2720
2719 rx_buf->skb = skb; 2721 rx_buf->skb = skb;
2722 rx_buf->desc = (struct l2_fhdr *) skb->data;
2720 dma_unmap_addr_set(rx_buf, mapping, mapping); 2723 dma_unmap_addr_set(rx_buf, mapping, mapping);
2721 2724
2722 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; 2725 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
@@ -2939,6 +2942,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2939 rxr->rx_prod_bseq += bp->rx_buf_use_size; 2942 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2940 2943
2941 prod_rx_buf->skb = skb; 2944 prod_rx_buf->skb = skb;
2945 prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
2942 2946
2943 if (cons == prod) 2947 if (cons == prod)
2944 return; 2948 return;
@@ -3072,6 +3076,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3072 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod; 3076 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3073 struct l2_fhdr *rx_hdr; 3077 struct l2_fhdr *rx_hdr;
3074 int rx_pkt = 0, pg_ring_used = 0; 3078 int rx_pkt = 0, pg_ring_used = 0;
3079 struct pci_dev *pdev = bp->pdev;
3075 3080
3076 hw_cons = bnx2_get_hw_rx_cons(bnapi); 3081 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3077 sw_cons = rxr->rx_cons; 3082 sw_cons = rxr->rx_cons;
@@ -3084,7 +3089,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3084 while (sw_cons != hw_cons) { 3089 while (sw_cons != hw_cons) {
3085 unsigned int len, hdr_len; 3090 unsigned int len, hdr_len;
3086 u32 status; 3091 u32 status;
3087 struct sw_bd *rx_buf; 3092 struct sw_bd *rx_buf, *next_rx_buf;
3088 struct sk_buff *skb; 3093 struct sk_buff *skb;
3089 dma_addr_t dma_addr; 3094 dma_addr_t dma_addr;
3090 u16 vtag = 0; 3095 u16 vtag = 0;
@@ -3095,7 +3100,14 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3095 3100
3096 rx_buf = &rxr->rx_buf_ring[sw_ring_cons]; 3101 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3097 skb = rx_buf->skb; 3102 skb = rx_buf->skb;
3103 prefetchw(skb);
3098 3104
3105 if (!get_dma_ops(&pdev->dev)->sync_single_for_cpu) {
3106 next_rx_buf =
3107 &rxr->rx_buf_ring[
3108 RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3109 prefetch(next_rx_buf->desc);
3110 }
3099 rx_buf->skb = NULL; 3111 rx_buf->skb = NULL;
3100 3112
3101 dma_addr = dma_unmap_addr(rx_buf, mapping); 3113 dma_addr = dma_unmap_addr(rx_buf, mapping);
@@ -3104,7 +3116,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3104 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, 3116 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3105 PCI_DMA_FROMDEVICE); 3117 PCI_DMA_FROMDEVICE);
3106 3118
3107 rx_hdr = (struct l2_fhdr *) skb->data; 3119 rx_hdr = rx_buf->desc;
3108 len = rx_hdr->l2_fhdr_pkt_len; 3120 len = rx_hdr->l2_fhdr_pkt_len;
3109 status = rx_hdr->l2_fhdr_status; 3121 status = rx_hdr->l2_fhdr_status;
3110 3122
@@ -3205,10 +3217,10 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3205 3217
3206#ifdef BCM_VLAN 3218#ifdef BCM_VLAN
3207 if (hw_vlan) 3219 if (hw_vlan)
3208 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag); 3220 vlan_gro_receive(&bnapi->napi, bp->vlgrp, vtag, skb);
3209 else 3221 else
3210#endif 3222#endif
3211 netif_receive_skb(skb); 3223 napi_gro_receive(&bnapi->napi, skb);
3212 3224
3213 rx_pkt++; 3225 rx_pkt++;
3214 3226
@@ -4758,8 +4770,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4758 rc = bnx2_alloc_bad_rbuf(bp); 4770 rc = bnx2_alloc_bad_rbuf(bp);
4759 } 4771 }
4760 4772
4761 if (bp->flags & BNX2_FLAG_USING_MSIX) 4773 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4762 bnx2_setup_msix_tbl(bp); 4774 bnx2_setup_msix_tbl(bp);
4775 /* Prevent MSIX table reads and write from timing out */
4776 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4777 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4778 }
4763 4779
4764 return rc; 4780 return rc;
4765} 4781}
@@ -5758,7 +5774,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5758 rx_buf = &rxr->rx_buf_ring[rx_start_idx]; 5774 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5759 rx_skb = rx_buf->skb; 5775 rx_skb = rx_buf->skb;
5760 5776
5761 rx_hdr = (struct l2_fhdr *) rx_skb->data; 5777 rx_hdr = rx_buf->desc;
5762 skb_reserve(rx_skb, BNX2_RX_OFFSET); 5778 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5763 5779
5764 pci_dma_sync_single_for_cpu(bp->pdev, 5780 pci_dma_sync_single_for_cpu(bp->pdev,
@@ -6272,12 +6288,12 @@ bnx2_reset_task(struct work_struct *work)
6272 return; 6288 return;
6273 } 6289 }
6274 6290
6275 bnx2_netif_stop(bp); 6291 bnx2_netif_stop(bp, true);
6276 6292
6277 bnx2_init_nic(bp, 1); 6293 bnx2_init_nic(bp, 1);
6278 6294
6279 atomic_set(&bp->intr_sem, 1); 6295 atomic_set(&bp->intr_sem, 1);
6280 bnx2_netif_start(bp); 6296 bnx2_netif_start(bp, true);
6281 rtnl_unlock(); 6297 rtnl_unlock();
6282} 6298}
6283 6299
@@ -6319,7 +6335,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6319 struct bnx2 *bp = netdev_priv(dev); 6335 struct bnx2 *bp = netdev_priv(dev);
6320 6336
6321 if (netif_running(dev)) 6337 if (netif_running(dev))
6322 bnx2_netif_stop(bp); 6338 bnx2_netif_stop(bp, false);
6323 6339
6324 bp->vlgrp = vlgrp; 6340 bp->vlgrp = vlgrp;
6325 6341
@@ -6330,7 +6346,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6330 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN) 6346 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6331 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1); 6347 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6332 6348
6333 bnx2_netif_start(bp); 6349 bnx2_netif_start(bp, false);
6334} 6350}
6335#endif 6351#endif
6336 6352
@@ -7050,9 +7066,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7050 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS; 7066 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7051 7067
7052 if (netif_running(bp->dev)) { 7068 if (netif_running(bp->dev)) {
7053 bnx2_netif_stop(bp); 7069 bnx2_netif_stop(bp, true);
7054 bnx2_init_nic(bp, 0); 7070 bnx2_init_nic(bp, 0);
7055 bnx2_netif_start(bp); 7071 bnx2_netif_start(bp, true);
7056 } 7072 }
7057 7073
7058 return 0; 7074 return 0;
@@ -7082,7 +7098,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7082 /* Reset will erase chipset stats; save them */ 7098 /* Reset will erase chipset stats; save them */
7083 bnx2_save_stats(bp); 7099 bnx2_save_stats(bp);
7084 7100
7085 bnx2_netif_stop(bp); 7101 bnx2_netif_stop(bp, true);
7086 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); 7102 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7087 bnx2_free_skbs(bp); 7103 bnx2_free_skbs(bp);
7088 bnx2_free_mem(bp); 7104 bnx2_free_mem(bp);
@@ -7110,7 +7126,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7110 bnx2_setup_cnic_irq_info(bp); 7126 bnx2_setup_cnic_irq_info(bp);
7111 mutex_unlock(&bp->cnic_lock); 7127 mutex_unlock(&bp->cnic_lock);
7112#endif 7128#endif
7113 bnx2_netif_start(bp); 7129 bnx2_netif_start(bp, true);
7114 } 7130 }
7115 return 0; 7131 return 0;
7116} 7132}
@@ -7363,7 +7379,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7363 if (etest->flags & ETH_TEST_FL_OFFLINE) { 7379 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7364 int i; 7380 int i;
7365 7381
7366 bnx2_netif_stop(bp); 7382 bnx2_netif_stop(bp, true);
7367 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG); 7383 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7368 bnx2_free_skbs(bp); 7384 bnx2_free_skbs(bp);
7369 7385
@@ -7382,7 +7398,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7382 bnx2_shutdown_chip(bp); 7398 bnx2_shutdown_chip(bp);
7383 else { 7399 else {
7384 bnx2_init_nic(bp, 1); 7400 bnx2_init_nic(bp, 1);
7385 bnx2_netif_start(bp); 7401 bnx2_netif_start(bp, true);
7386 } 7402 }
7387 7403
7388 /* wait for link up */ 7404 /* wait for link up */
@@ -8290,7 +8306,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8290 memcpy(dev->dev_addr, bp->mac_addr, 6); 8306 memcpy(dev->dev_addr, bp->mac_addr, 6);
8291 memcpy(dev->perm_addr, bp->mac_addr, 6); 8307 memcpy(dev->perm_addr, bp->mac_addr, 6);
8292 8308
8293 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 8309 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
8294 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG); 8310 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8295 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 8311 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8296 dev->features |= NETIF_F_IPV6_CSUM; 8312 dev->features |= NETIF_F_IPV6_CSUM;
@@ -8376,7 +8392,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8376 return 0; 8392 return 0;
8377 8393
8378 flush_scheduled_work(); 8394 flush_scheduled_work();
8379 bnx2_netif_stop(bp); 8395 bnx2_netif_stop(bp, true);
8380 netif_device_detach(dev); 8396 netif_device_detach(dev);
8381 del_timer_sync(&bp->timer); 8397 del_timer_sync(&bp->timer);
8382 bnx2_shutdown_chip(bp); 8398 bnx2_shutdown_chip(bp);
@@ -8398,7 +8414,7 @@ bnx2_resume(struct pci_dev *pdev)
8398 bnx2_set_power_state(bp, PCI_D0); 8414 bnx2_set_power_state(bp, PCI_D0);
8399 netif_device_attach(dev); 8415 netif_device_attach(dev);
8400 bnx2_init_nic(bp, 1); 8416 bnx2_init_nic(bp, 1);
8401 bnx2_netif_start(bp); 8417 bnx2_netif_start(bp, true);
8402 return 0; 8418 return 0;
8403} 8419}
8404 8420
@@ -8425,7 +8441,7 @@ static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8425 } 8441 }
8426 8442
8427 if (netif_running(dev)) { 8443 if (netif_running(dev)) {
8428 bnx2_netif_stop(bp); 8444 bnx2_netif_stop(bp, true);
8429 del_timer_sync(&bp->timer); 8445 del_timer_sync(&bp->timer);
8430 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); 8446 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8431 } 8447 }
@@ -8482,7 +8498,7 @@ static void bnx2_io_resume(struct pci_dev *pdev)
8482 8498
8483 rtnl_lock(); 8499 rtnl_lock();
8484 if (netif_running(dev)) 8500 if (netif_running(dev))
8485 bnx2_netif_start(bp); 8501 bnx2_netif_start(bp, true);
8486 8502
8487 netif_device_attach(dev); 8503 netif_device_attach(dev);
8488 rtnl_unlock(); 8504 rtnl_unlock();
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index ab34a5d86f86..dd35bd0b7e05 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6551,6 +6551,7 @@ struct l2_fhdr {
6551 6551
6552struct sw_bd { 6552struct sw_bd {
6553 struct sk_buff *skb; 6553 struct sk_buff *skb;
6554 struct l2_fhdr *desc;
6554 DEFINE_DMA_UNMAP_ADDR(mapping); 6555 DEFINE_DMA_UNMAP_ADDR(mapping);
6555}; 6556};
6556 6557
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 081953005b84..8bd23687c530 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -1330,7 +1330,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1330 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ 1330 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
1331 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) 1331 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
1332 1332
1333#define MULTI_FLAGS(bp) \ 1333#define RSS_FLAGS(bp) \
1334 (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \ 1334 (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
1335 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \ 1335 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
1336 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \ 1336 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 0c6dba24e37e..2bc35c794aec 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -1582,7 +1582,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1582 struct sw_rx_bd *rx_buf = NULL; 1582 struct sw_rx_bd *rx_buf = NULL;
1583 struct sk_buff *skb; 1583 struct sk_buff *skb;
1584 union eth_rx_cqe *cqe; 1584 union eth_rx_cqe *cqe;
1585 u8 cqe_fp_flags; 1585 u8 cqe_fp_flags, cqe_fp_status_flags;
1586 u16 len, pad; 1586 u16 len, pad;
1587 1587
1588 comp_ring_cons = RCQ_BD(sw_comp_cons); 1588 comp_ring_cons = RCQ_BD(sw_comp_cons);
@@ -1598,6 +1598,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1598 1598
1599 cqe = &fp->rx_comp_ring[comp_ring_cons]; 1599 cqe = &fp->rx_comp_ring[comp_ring_cons];
1600 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; 1600 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1601 cqe_fp_status_flags = cqe->fast_path_cqe.status_flags;
1601 1602
1602 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" 1603 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1603 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), 1604 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
@@ -1616,7 +1617,6 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1616 rx_buf = &fp->rx_buf_ring[bd_cons]; 1617 rx_buf = &fp->rx_buf_ring[bd_cons];
1617 skb = rx_buf->skb; 1618 skb = rx_buf->skb;
1618 prefetch(skb); 1619 prefetch(skb);
1619 prefetch((u8 *)skb + 256);
1620 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); 1620 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1621 pad = cqe->fast_path_cqe.placement_offset; 1621 pad = cqe->fast_path_cqe.placement_offset;
1622 1622
@@ -1667,7 +1667,6 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1667 dma_unmap_addr(rx_buf, mapping), 1667 dma_unmap_addr(rx_buf, mapping),
1668 pad + RX_COPY_THRESH, 1668 pad + RX_COPY_THRESH,
1669 DMA_FROM_DEVICE); 1669 DMA_FROM_DEVICE);
1670 prefetch(skb);
1671 prefetch(((char *)(skb)) + 128); 1670 prefetch(((char *)(skb)) + 128);
1672 1671
1673 /* is this an error packet? */ 1672 /* is this an error packet? */
@@ -1727,6 +1726,12 @@ reuse_rx:
1727 1726
1728 skb->protocol = eth_type_trans(skb, bp->dev); 1727 skb->protocol = eth_type_trans(skb, bp->dev);
1729 1728
1729 if ((bp->dev->features & NETIF_F_RXHASH) &&
1730 (cqe_fp_status_flags &
1731 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
1732 skb->rxhash = le32_to_cpu(
1733 cqe->fast_path_cqe.rss_hash_result);
1734
1730 skb->ip_summed = CHECKSUM_NONE; 1735 skb->ip_summed = CHECKSUM_NONE;
1731 if (bp->rx_csum) { 1736 if (bp->rx_csum) {
1732 if (likely(BNX2X_RX_CSUM_OK(cqe))) 1737 if (likely(BNX2X_RX_CSUM_OK(cqe)))
@@ -5750,10 +5755,10 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5750 u32 offset; 5755 u32 offset;
5751 u16 max_agg_size; 5756 u16 max_agg_size;
5752 5757
5753 if (is_multi(bp)) { 5758 tstorm_config.config_flags = RSS_FLAGS(bp);
5754 tstorm_config.config_flags = MULTI_FLAGS(bp); 5759
5760 if (is_multi(bp))
5755 tstorm_config.rss_result_mask = MULTI_MASK; 5761 tstorm_config.rss_result_mask = MULTI_MASK;
5756 }
5757 5762
5758 /* Enable TPA if needed */ 5763 /* Enable TPA if needed */
5759 if (bp->flags & TPA_ENABLE_FLAG) 5764 if (bp->flags & TPA_ENABLE_FLAG)
@@ -6629,10 +6634,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
6629 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE); 6634 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6630 6635
6631 REG_WR(bp, SRC_REG_SOFT_RST, 1); 6636 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6632 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) { 6637 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
6633 REG_WR(bp, i, 0xc0cac01a); 6638 REG_WR(bp, i, random32());
6634 /* TODO: replace with something meaningful */
6635 }
6636 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE); 6639 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6637#ifdef BCM_CNIC 6640#ifdef BCM_CNIC
6638 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); 6641 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
@@ -11001,6 +11004,11 @@ static int bnx2x_set_flags(struct net_device *dev, u32 data)
11001 changed = 1; 11004 changed = 1;
11002 } 11005 }
11003 11006
11007 if (data & ETH_FLAG_RXHASH)
11008 dev->features |= NETIF_F_RXHASH;
11009 else
11010 dev->features &= ~NETIF_F_RXHASH;
11011
11004 if (changed && netif_running(dev)) { 11012 if (changed && netif_running(dev)) {
11005 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 11013 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11006 rc = bnx2x_nic_load(bp, LOAD_NORMAL); 11014 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 85e813c7762b..5e12462a9d5e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -59,6 +59,7 @@
59#include <linux/uaccess.h> 59#include <linux/uaccess.h>
60#include <linux/errno.h> 60#include <linux/errno.h>
61#include <linux/netdevice.h> 61#include <linux/netdevice.h>
62#include <linux/netpoll.h>
62#include <linux/inetdevice.h> 63#include <linux/inetdevice.h>
63#include <linux/igmp.h> 64#include <linux/igmp.h>
64#include <linux/etherdevice.h> 65#include <linux/etherdevice.h>
@@ -430,7 +431,18 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
430 } 431 }
431 432
432 skb->priority = 1; 433 skb->priority = 1;
433 dev_queue_xmit(skb); 434#ifdef CONFIG_NET_POLL_CONTROLLER
435 if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) {
436 struct netpoll *np = bond->dev->npinfo->netpoll;
437 slave_dev->npinfo = bond->dev->npinfo;
438 np->real_dev = np->dev = skb->dev;
439 slave_dev->priv_flags |= IFF_IN_NETPOLL;
440 netpoll_send_skb(np, skb);
441 slave_dev->priv_flags &= ~IFF_IN_NETPOLL;
442 np->dev = bond->dev;
443 } else
444#endif
445 dev_queue_xmit(skb);
434 446
435 return 0; 447 return 0;
436} 448}
@@ -1256,6 +1268,61 @@ static void bond_detach_slave(struct bonding *bond, struct slave *slave)
1256 bond->slave_cnt--; 1268 bond->slave_cnt--;
1257} 1269}
1258 1270
1271#ifdef CONFIG_NET_POLL_CONTROLLER
1272/*
1273 * You must hold read lock on bond->lock before calling this.
1274 */
1275static bool slaves_support_netpoll(struct net_device *bond_dev)
1276{
1277 struct bonding *bond = netdev_priv(bond_dev);
1278 struct slave *slave;
1279 int i = 0;
1280 bool ret = true;
1281
1282 bond_for_each_slave(bond, slave, i) {
1283 if ((slave->dev->priv_flags & IFF_DISABLE_NETPOLL) ||
1284 !slave->dev->netdev_ops->ndo_poll_controller)
1285 ret = false;
1286 }
1287 return i != 0 && ret;
1288}
1289
1290static void bond_poll_controller(struct net_device *bond_dev)
1291{
1292 struct net_device *dev = bond_dev->npinfo->netpoll->real_dev;
1293 if (dev != bond_dev)
1294 netpoll_poll_dev(dev);
1295}
1296
1297static void bond_netpoll_cleanup(struct net_device *bond_dev)
1298{
1299 struct bonding *bond = netdev_priv(bond_dev);
1300 struct slave *slave;
1301 const struct net_device_ops *ops;
1302 int i;
1303
1304 read_lock(&bond->lock);
1305 bond_dev->npinfo = NULL;
1306 bond_for_each_slave(bond, slave, i) {
1307 if (slave->dev) {
1308 ops = slave->dev->netdev_ops;
1309 if (ops->ndo_netpoll_cleanup)
1310 ops->ndo_netpoll_cleanup(slave->dev);
1311 else
1312 slave->dev->npinfo = NULL;
1313 }
1314 }
1315 read_unlock(&bond->lock);
1316}
1317
1318#else
1319
1320static void bond_netpoll_cleanup(struct net_device *bond_dev)
1321{
1322}
1323
1324#endif
1325
1259/*---------------------------------- IOCTL ----------------------------------*/ 1326/*---------------------------------- IOCTL ----------------------------------*/
1260 1327
1261static int bond_sethwaddr(struct net_device *bond_dev, 1328static int bond_sethwaddr(struct net_device *bond_dev,
@@ -1674,6 +1741,18 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1674 1741
1675 bond_set_carrier(bond); 1742 bond_set_carrier(bond);
1676 1743
1744#ifdef CONFIG_NET_POLL_CONTROLLER
1745 if (slaves_support_netpoll(bond_dev)) {
1746 bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
1747 if (bond_dev->npinfo)
1748 slave_dev->npinfo = bond_dev->npinfo;
1749 } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) {
1750 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
1751 pr_info("New slave device %s does not support netpoll\n",
1752 slave_dev->name);
1753 pr_info("Disabling netpoll support for %s\n", bond_dev->name);
1754 }
1755#endif
1677 read_unlock(&bond->lock); 1756 read_unlock(&bond->lock);
1678 1757
1679 res = bond_create_slave_symlinks(bond_dev, slave_dev); 1758 res = bond_create_slave_symlinks(bond_dev, slave_dev);
@@ -1740,6 +1819,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1740 return -EINVAL; 1819 return -EINVAL;
1741 } 1820 }
1742 1821
1822 netdev_bonding_change(bond_dev, NETDEV_BONDING_DESLAVE);
1743 write_lock_bh(&bond->lock); 1823 write_lock_bh(&bond->lock);
1744 1824
1745 slave = bond_get_slave_by_dev(bond, slave_dev); 1825 slave = bond_get_slave_by_dev(bond, slave_dev);
@@ -1868,6 +1948,17 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1868 1948
1869 netdev_set_master(slave_dev, NULL); 1949 netdev_set_master(slave_dev, NULL);
1870 1950
1951#ifdef CONFIG_NET_POLL_CONTROLLER
1952 read_lock_bh(&bond->lock);
1953 if (slaves_support_netpoll(bond_dev))
1954 bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
1955 read_unlock_bh(&bond->lock);
1956 if (slave_dev->netdev_ops->ndo_netpoll_cleanup)
1957 slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev);
1958 else
1959 slave_dev->npinfo = NULL;
1960#endif
1961
1871 /* close slave before restoring its mac address */ 1962 /* close slave before restoring its mac address */
1872 dev_close(slave_dev); 1963 dev_close(slave_dev);
1873 1964
@@ -4406,6 +4497,10 @@ static const struct net_device_ops bond_netdev_ops = {
4406 .ndo_vlan_rx_register = bond_vlan_rx_register, 4497 .ndo_vlan_rx_register = bond_vlan_rx_register,
4407 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, 4498 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
4408 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, 4499 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
4500#ifdef CONFIG_NET_POLL_CONTROLLER
4501 .ndo_netpoll_cleanup = bond_netpoll_cleanup,
4502 .ndo_poll_controller = bond_poll_controller,
4503#endif
4409}; 4504};
4410 4505
4411static void bond_destructor(struct net_device *bond_dev) 4506static void bond_destructor(struct net_device *bond_dev)
@@ -4499,6 +4594,8 @@ static void bond_uninit(struct net_device *bond_dev)
4499{ 4594{
4500 struct bonding *bond = netdev_priv(bond_dev); 4595 struct bonding *bond = netdev_priv(bond_dev);
4501 4596
4597 bond_netpoll_cleanup(bond_dev);
4598
4502 /* Release the bonded slaves */ 4599 /* Release the bonded slaves */
4503 bond_release_all(bond_dev); 4600 bond_release_all(bond_dev);
4504 4601
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 38c0186cfbc2..09257ca8f563 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -197,7 +197,8 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data,
197 197
198 /* Get a suitable caif packet and copy in data. */ 198 /* Get a suitable caif packet and copy in data. */
199 skb = netdev_alloc_skb(ser->dev, count+1); 199 skb = netdev_alloc_skb(ser->dev, count+1);
200 BUG_ON(skb == NULL); 200 if (skb == NULL)
201 return;
201 p = skb_put(skb, count); 202 p = skb_put(skb, count);
202 memcpy(p, data, count); 203 memcpy(p, data, count);
203 204
@@ -315,6 +316,8 @@ static int ldisc_open(struct tty_struct *tty)
315 /* No write no play */ 316 /* No write no play */
316 if (tty->ops->write == NULL) 317 if (tty->ops->write == NULL)
317 return -EOPNOTSUPP; 318 return -EOPNOTSUPP;
319 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_TTY_CONFIG))
320 return -EPERM;
318 321
319 sprintf(name, "cf%s", tty->name); 322 sprintf(name, "cf%s", tty->name);
320 dev = alloc_netdev(sizeof(*ser), name, caifdev_setup); 323 dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 33451092b8e8..d800b598ae3d 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -1006,7 +1006,7 @@ static int ems_usb_probe(struct usb_interface *intf,
1006 1006
1007 netdev = alloc_candev(sizeof(struct ems_usb), MAX_TX_URBS); 1007 netdev = alloc_candev(sizeof(struct ems_usb), MAX_TX_URBS);
1008 if (!netdev) { 1008 if (!netdev) {
1009 dev_err(netdev->dev.parent, "Couldn't alloc candev\n"); 1009 dev_err(&intf->dev, "ems_usb: Couldn't alloc candev\n");
1010 return -ENOMEM; 1010 return -ENOMEM;
1011 } 1011 }
1012 1012
@@ -1036,20 +1036,20 @@ static int ems_usb_probe(struct usb_interface *intf,
1036 1036
1037 dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL); 1037 dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
1038 if (!dev->intr_urb) { 1038 if (!dev->intr_urb) {
1039 dev_err(netdev->dev.parent, "Couldn't alloc intr URB\n"); 1039 dev_err(&intf->dev, "Couldn't alloc intr URB\n");
1040 goto cleanup_candev; 1040 goto cleanup_candev;
1041 } 1041 }
1042 1042
1043 dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL); 1043 dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL);
1044 if (!dev->intr_in_buffer) { 1044 if (!dev->intr_in_buffer) {
1045 dev_err(netdev->dev.parent, "Couldn't alloc Intr buffer\n"); 1045 dev_err(&intf->dev, "Couldn't alloc Intr buffer\n");
1046 goto cleanup_intr_urb; 1046 goto cleanup_intr_urb;
1047 } 1047 }
1048 1048
1049 dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE + 1049 dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE +
1050 sizeof(struct ems_cpc_msg), GFP_KERNEL); 1050 sizeof(struct ems_cpc_msg), GFP_KERNEL);
1051 if (!dev->tx_msg_buffer) { 1051 if (!dev->tx_msg_buffer) {
1052 dev_err(netdev->dev.parent, "Couldn't alloc Tx buffer\n"); 1052 dev_err(&intf->dev, "Couldn't alloc Tx buffer\n");
1053 goto cleanup_intr_in_buffer; 1053 goto cleanup_intr_in_buffer;
1054 } 1054 }
1055 1055
diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c
index 5248f9e0b2f4..35cd36729155 100644
--- a/drivers/net/cxgb3/ael1002.c
+++ b/drivers/net/cxgb3/ael1002.c
@@ -934,7 +934,7 @@ static struct cphy_ops xaui_direct_ops = {
934int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter, 934int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
935 int phy_addr, const struct mdio_ops *mdio_ops) 935 int phy_addr, const struct mdio_ops *mdio_ops)
936{ 936{
937 cphy_init(phy, adapter, MDIO_PRTAD_NONE, &xaui_direct_ops, mdio_ops, 937 cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops,
938 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP, 938 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
939 "10GBASE-CX4"); 939 "10GBASE-CX4");
940 return 0; 940 return 0;
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index aced6c5e635c..e3f1b8566495 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -439,7 +439,7 @@ static void free_irq_resources(struct adapter *adapter)
439static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, 439static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
440 unsigned long n) 440 unsigned long n)
441{ 441{
442 int attempts = 5; 442 int attempts = 10;
443 443
444 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { 444 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
445 if (!--attempts) 445 if (!--attempts)
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index 3d8ff4889b56..8856a75fcc94 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -53,7 +53,7 @@
53 53
54enum { 54enum {
55 MAX_NPORTS = 4, /* max # of ports */ 55 MAX_NPORTS = 4, /* max # of ports */
56 SERNUM_LEN = 16, /* Serial # length */ 56 SERNUM_LEN = 24, /* Serial # length */
57 EC_LEN = 16, /* E/C length */ 57 EC_LEN = 16, /* E/C length */
58 ID_LEN = 16, /* ID length */ 58 ID_LEN = 16, /* ID length */
59}; 59};
@@ -651,8 +651,6 @@ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
651 struct link_config *lc); 651 struct link_config *lc);
652int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); 652int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
653int t4_seeprom_wp(struct adapter *adapter, bool enable); 653int t4_seeprom_wp(struct adapter *adapter, bool enable);
654int t4_read_flash(struct adapter *adapter, unsigned int addr,
655 unsigned int nwords, u32 *data, int byte_oriented);
656int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); 654int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
657int t4_check_fw_version(struct adapter *adapter); 655int t4_check_fw_version(struct adapter *adapter);
658int t4_prep_adapter(struct adapter *adapter); 656int t4_prep_adapter(struct adapter *adapter);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 5f582dba928f..1bad50041427 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -1711,6 +1711,18 @@ static int set_tso(struct net_device *dev, u32 value)
1711 return 0; 1711 return 0;
1712} 1712}
1713 1713
1714static int set_flags(struct net_device *dev, u32 flags)
1715{
1716 if (flags & ~ETH_FLAG_RXHASH)
1717 return -EOPNOTSUPP;
1718
1719 if (flags & ETH_FLAG_RXHASH)
1720 dev->features |= NETIF_F_RXHASH;
1721 else
1722 dev->features &= ~NETIF_F_RXHASH;
1723 return 0;
1724}
1725
1714static struct ethtool_ops cxgb_ethtool_ops = { 1726static struct ethtool_ops cxgb_ethtool_ops = {
1715 .get_settings = get_settings, 1727 .get_settings = get_settings,
1716 .set_settings = set_settings, 1728 .set_settings = set_settings,
@@ -1741,6 +1753,7 @@ static struct ethtool_ops cxgb_ethtool_ops = {
1741 .get_wol = get_wol, 1753 .get_wol = get_wol,
1742 .set_wol = set_wol, 1754 .set_wol = set_wol,
1743 .set_tso = set_tso, 1755 .set_tso = set_tso,
1756 .set_flags = set_flags,
1744 .flash_device = set_flash, 1757 .flash_device = set_flash,
1745}; 1758};
1746 1759
@@ -3203,7 +3216,7 @@ static int __devinit init_one(struct pci_dev *pdev,
3203 3216
3204 netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; 3217 netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
3205 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 3218 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3206 netdev->features |= NETIF_F_GRO | highdma; 3219 netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma;
3207 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 3220 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3208 netdev->vlan_features = netdev->features & VLAN_FEAT; 3221 netdev->vlan_features = netdev->features & VLAN_FEAT;
3209 3222
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index 14adc58e71c3..d1f8f225e45a 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -1471,7 +1471,7 @@ EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
1471 * Releases the pages of a packet gather list. We do not own the last 1471 * Releases the pages of a packet gather list. We do not own the last
1472 * page on the list and do not free it. 1472 * page on the list and do not free it.
1473 */ 1473 */
1474void t4_pktgl_free(const struct pkt_gl *gl) 1474static void t4_pktgl_free(const struct pkt_gl *gl)
1475{ 1475{
1476 int n; 1476 int n;
1477 const skb_frag_t *p; 1477 const skb_frag_t *p;
@@ -1524,6 +1524,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1524 skb->truesize += skb->data_len; 1524 skb->truesize += skb->data_len;
1525 skb->ip_summed = CHECKSUM_UNNECESSARY; 1525 skb->ip_summed = CHECKSUM_UNNECESSARY;
1526 skb_record_rx_queue(skb, rxq->rspq.idx); 1526 skb_record_rx_queue(skb, rxq->rspq.idx);
1527 if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
1528 skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1527 1529
1528 if (unlikely(pkt->vlan_ex)) { 1530 if (unlikely(pkt->vlan_ex)) {
1529 struct port_info *pi = netdev_priv(rxq->rspq.netdev); 1531 struct port_info *pi = netdev_priv(rxq->rspq.netdev);
@@ -1565,7 +1567,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1565 if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT)) 1567 if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
1566 return handle_trace_pkt(q->adap, si); 1568 return handle_trace_pkt(q->adap, si);
1567 1569
1568 pkt = (void *)&rsp[1]; 1570 pkt = (const struct cpl_rx_pkt *)rsp;
1569 csum_ok = pkt->csum_calc && !pkt->err_vec; 1571 csum_ok = pkt->csum_calc && !pkt->err_vec;
1570 if ((pkt->l2info & htonl(RXF_TCP)) && 1572 if ((pkt->l2info & htonl(RXF_TCP)) &&
1571 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { 1573 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
@@ -1583,6 +1585,9 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1583 __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */ 1585 __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */
1584 skb->protocol = eth_type_trans(skb, q->netdev); 1586 skb->protocol = eth_type_trans(skb, q->netdev);
1585 skb_record_rx_queue(skb, q->idx); 1587 skb_record_rx_queue(skb, q->idx);
1588 if (skb->dev->features & NETIF_F_RXHASH)
1589 skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1590
1586 pi = netdev_priv(skb->dev); 1591 pi = netdev_priv(skb->dev);
1587 rxq->stats.pkts++; 1592 rxq->stats.pkts++;
1588 1593
@@ -2047,7 +2052,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2047 adap->sge.ingr_map[iq->cntxt_id] = iq; 2052 adap->sge.ingr_map[iq->cntxt_id] = iq;
2048 2053
2049 if (fl) { 2054 if (fl) {
2050 fl->cntxt_id = htons(c.fl0id); 2055 fl->cntxt_id = ntohs(c.fl0id);
2051 fl->avail = fl->pend_cred = 0; 2056 fl->avail = fl->pend_cred = 0;
2052 fl->pidx = fl->cidx = 0; 2057 fl->pidx = fl->cidx = 0;
2053 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; 2058 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
index a814a3afe123..2923dd43523d 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -53,8 +53,8 @@
53 * at the time it indicated completion is stored there. Returns 0 if the 53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise. 54 * operation completes and -EAGAIN otherwise.
55 */ 55 */
56int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, 56static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp) 57 int polarity, int attempts, int delay, u32 *valp)
58{ 58{
59 while (1) { 59 while (1) {
60 u32 val = t4_read_reg(adapter, reg); 60 u32 val = t4_read_reg(adapter, reg);
@@ -109,9 +109,9 @@ void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
109 * Reads registers that are accessed indirectly through an address/data 109 * Reads registers that are accessed indirectly through an address/data
110 * register pair. 110 * register pair.
111 */ 111 */
112void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 112static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals, unsigned int nregs, 113 unsigned int data_reg, u32 *vals,
114 unsigned int start_idx) 114 unsigned int nregs, unsigned int start_idx)
115{ 115{
116 while (nregs--) { 116 while (nregs--) {
117 t4_write_reg(adap, addr_reg, start_idx); 117 t4_write_reg(adap, addr_reg, start_idx);
@@ -120,6 +120,7 @@ void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
120 } 120 }
121} 121}
122 122
123#if 0
123/** 124/**
124 * t4_write_indirect - write indirectly addressed registers 125 * t4_write_indirect - write indirectly addressed registers
125 * @adap: the adapter 126 * @adap: the adapter
@@ -132,15 +133,16 @@ void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
132 * Writes a sequential block of registers that are accessed indirectly 133 * Writes a sequential block of registers that are accessed indirectly
133 * through an address/data register pair. 134 * through an address/data register pair.
134 */ 135 */
135void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 136static void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
136 unsigned int data_reg, const u32 *vals, 137 unsigned int data_reg, const u32 *vals,
137 unsigned int nregs, unsigned int start_idx) 138 unsigned int nregs, unsigned int start_idx)
138{ 139{
139 while (nregs--) { 140 while (nregs--) {
140 t4_write_reg(adap, addr_reg, start_idx++); 141 t4_write_reg(adap, addr_reg, start_idx++);
141 t4_write_reg(adap, data_reg, *vals++); 142 t4_write_reg(adap, data_reg, *vals++);
142 } 143 }
143} 144}
145#endif
144 146
145/* 147/*
146 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 148 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
@@ -345,33 +347,21 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
345 return 0; 347 return 0;
346} 348}
347 349
348#define VPD_ENTRY(name, len) \
349 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
350
351/* 350/*
352 * Partial EEPROM Vital Product Data structure. Includes only the ID and 351 * Partial EEPROM Vital Product Data structure. Includes only the ID and
353 * VPD-R sections. 352 * VPD-R header.
354 */ 353 */
355struct t4_vpd { 354struct t4_vpd_hdr {
356 u8 id_tag; 355 u8 id_tag;
357 u8 id_len[2]; 356 u8 id_len[2];
358 u8 id_data[ID_LEN]; 357 u8 id_data[ID_LEN];
359 u8 vpdr_tag; 358 u8 vpdr_tag;
360 u8 vpdr_len[2]; 359 u8 vpdr_len[2];
361 VPD_ENTRY(pn, 16); /* part number */
362 VPD_ENTRY(ec, EC_LEN); /* EC level */
363 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
364 VPD_ENTRY(na, 12); /* MAC address base */
365 VPD_ENTRY(port_type, 8); /* port types */
366 VPD_ENTRY(gpio, 14); /* GPIO usage */
367 VPD_ENTRY(cclk, 6); /* core clock */
368 VPD_ENTRY(port_addr, 8); /* port MDIO addresses */
369 VPD_ENTRY(rv, 1); /* csum */
370 u32 pad; /* for multiple-of-4 sizing and alignment */
371}; 360};
372 361
373#define EEPROM_STAT_ADDR 0x7bfc 362#define EEPROM_STAT_ADDR 0x7bfc
374#define VPD_BASE 0 363#define VPD_BASE 0
364#define VPD_LEN 512
375 365
376/** 366/**
377 * t4_seeprom_wp - enable/disable EEPROM write protection 367 * t4_seeprom_wp - enable/disable EEPROM write protection
@@ -396,16 +386,36 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
396 */ 386 */
397static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) 387static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
398{ 388{
399 int ret; 389 int i, ret;
400 struct t4_vpd vpd; 390 int ec, sn, v2;
401 u8 *q = (u8 *)&vpd, csum; 391 u8 vpd[VPD_LEN], csum;
392 unsigned int vpdr_len;
393 const struct t4_vpd_hdr *v;
402 394
403 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), &vpd); 395 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
404 if (ret < 0) 396 if (ret < 0)
405 return ret; 397 return ret;
406 398
407 for (csum = 0; q <= vpd.rv_data; q++) 399 v = (const struct t4_vpd_hdr *)vpd;
408 csum += *q; 400 vpdr_len = pci_vpd_lrdt_size(&v->vpdr_tag);
401 if (vpdr_len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
402 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
403 return -EINVAL;
404 }
405
406#define FIND_VPD_KW(var, name) do { \
407 var = pci_vpd_find_info_keyword(&v->id_tag, sizeof(struct t4_vpd_hdr), \
408 vpdr_len, name); \
409 if (var < 0) { \
410 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
411 return -EINVAL; \
412 } \
413 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
414} while (0)
415
416 FIND_VPD_KW(i, "RV");
417 for (csum = 0; i >= 0; i--)
418 csum += vpd[i];
409 419
410 if (csum) { 420 if (csum) {
411 dev_err(adapter->pdev_dev, 421 dev_err(adapter->pdev_dev,
@@ -413,12 +423,18 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
413 return -EINVAL; 423 return -EINVAL;
414 } 424 }
415 425
416 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10); 426 FIND_VPD_KW(ec, "EC");
417 memcpy(p->id, vpd.id_data, sizeof(vpd.id_data)); 427 FIND_VPD_KW(sn, "SN");
428 FIND_VPD_KW(v2, "V2");
429#undef FIND_VPD_KW
430
431 p->cclk = simple_strtoul(vpd + v2, NULL, 10);
432 memcpy(p->id, v->id_data, ID_LEN);
418 strim(p->id); 433 strim(p->id);
419 memcpy(p->ec, vpd.ec_data, sizeof(vpd.ec_data)); 434 memcpy(p->ec, vpd + ec, EC_LEN);
420 strim(p->ec); 435 strim(p->ec);
421 memcpy(p->sn, vpd.sn_data, sizeof(vpd.sn_data)); 436 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
437 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
422 strim(p->sn); 438 strim(p->sn);
423 return 0; 439 return 0;
424} 440}
@@ -537,8 +553,8 @@ static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
537 * (i.e., big-endian), otherwise as 32-bit words in the platform's 553 * (i.e., big-endian), otherwise as 32-bit words in the platform's
538 * natural endianess. 554 * natural endianess.
539 */ 555 */
540int t4_read_flash(struct adapter *adapter, unsigned int addr, 556static int t4_read_flash(struct adapter *adapter, unsigned int addr,
541 unsigned int nwords, u32 *data, int byte_oriented) 557 unsigned int nwords, u32 *data, int byte_oriented)
542{ 558{
543 int ret; 559 int ret;
544 560
diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h
index fdb117443144..7a981b81afaf 100644
--- a/drivers/net/cxgb4/t4_msg.h
+++ b/drivers/net/cxgb4/t4_msg.h
@@ -503,6 +503,7 @@ struct cpl_rx_data_ack {
503}; 503};
504 504
505struct cpl_rx_pkt { 505struct cpl_rx_pkt {
506 struct rss_header rsshdr;
506 u8 opcode; 507 u8 opcode;
507#if defined(__LITTLE_ENDIAN_BITFIELD) 508#if defined(__LITTLE_ENDIAN_BITFIELD)
508 u8 iff:4; 509 u8 iff:4;
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 3e8d0005540f..ef97bfcef9dd 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -168,6 +168,7 @@
168#include <linux/ethtool.h> 168#include <linux/ethtool.h>
169#include <linux/string.h> 169#include <linux/string.h>
170#include <linux/firmware.h> 170#include <linux/firmware.h>
171#include <linux/rtnetlink.h>
171#include <asm/unaligned.h> 172#include <asm/unaligned.h>
172 173
173 174
@@ -2280,8 +2281,13 @@ static void e100_tx_timeout_task(struct work_struct *work)
2280 2281
2281 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, 2282 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
2282 "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status)); 2283 "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
2283 e100_down(netdev_priv(netdev)); 2284
2284 e100_up(netdev_priv(netdev)); 2285 rtnl_lock();
2286 if (netif_running(netdev)) {
2287 e100_down(netdev_priv(netdev));
2288 e100_up(netdev_priv(netdev));
2289 }
2290 rtnl_unlock();
2285} 2291}
2286 2292
2287static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) 2293static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 2f29c2131851..40b62b406b08 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -81,23 +81,6 @@ struct e1000_adapter;
81 81
82#include "e1000_hw.h" 82#include "e1000_hw.h"
83 83
84#ifdef DBG
85#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
86#else
87#define E1000_DBG(args...)
88#endif
89
90#define E1000_ERR(args...) printk(KERN_ERR "e1000: " args)
91
92#define PFX "e1000: "
93
94#define DPRINTK(nlevel, klevel, fmt, args...) \
95do { \
96 if (NETIF_MSG_##nlevel & adapter->msg_enable) \
97 printk(KERN_##klevel PFX "%s: %s: " fmt, \
98 adapter->netdev->name, __func__, ##args); \
99} while (0)
100
101#define E1000_MAX_INTR 10 84#define E1000_MAX_INTR 10
102 85
103/* TX/RX descriptor defines */ 86/* TX/RX descriptor defines */
@@ -335,6 +318,25 @@ enum e1000_state_t {
335 __E1000_DOWN 318 __E1000_DOWN
336}; 319};
337 320
321#undef pr_fmt
322#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
323
324extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
325#define e_dbg(format, arg...) \
326 netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
327#define e_err(format, arg...) \
328 netdev_err(adapter->netdev, format, ## arg)
329#define e_info(format, arg...) \
330 netdev_info(adapter->netdev, format, ## arg)
331#define e_warn(format, arg...) \
332 netdev_warn(adapter->netdev, format, ## arg)
333#define e_notice(format, arg...) \
334 netdev_notice(adapter->netdev, format, ## arg)
335#define e_dev_info(format, arg...) \
336 dev_info(&adapter->pdev->dev, format, ## arg)
337#define e_dev_warn(format, arg...) \
338 dev_warn(&adapter->pdev->dev, format, ## arg)
339
338extern char e1000_driver_name[]; 340extern char e1000_driver_name[];
339extern const char e1000_driver_version[]; 341extern const char e1000_driver_version[];
340 342
@@ -352,5 +354,6 @@ extern bool e1000_has_link(struct e1000_adapter *adapter);
352extern void e1000_power_up_phy(struct e1000_adapter *); 354extern void e1000_power_up_phy(struct e1000_adapter *);
353extern void e1000_set_ethtool_ops(struct net_device *netdev); 355extern void e1000_set_ethtool_ops(struct net_device *netdev);
354extern void e1000_check_options(struct e1000_adapter *adapter); 356extern void e1000_check_options(struct e1000_adapter *adapter);
357extern char *e1000_get_hw_dev_name(struct e1000_hw *hw);
355 358
356#endif /* _E1000_H_ */ 359#endif /* _E1000_H_ */
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index c67e93117271..2a3b2dccd06d 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -346,7 +346,7 @@ static int e1000_set_tso(struct net_device *netdev, u32 data)
346 346
347 netdev->features &= ~NETIF_F_TSO6; 347 netdev->features &= ~NETIF_F_TSO6;
348 348
349 DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled"); 349 e_info("TSO is %s\n", data ? "Enabled" : "Disabled");
350 adapter->tso_force = true; 350 adapter->tso_force = true;
351 return 0; 351 return 0;
352} 352}
@@ -714,9 +714,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg,
714 writel(write & test[i], address); 714 writel(write & test[i], address);
715 read = readl(address); 715 read = readl(address);
716 if (read != (write & test[i] & mask)) { 716 if (read != (write & test[i] & mask)) {
717 DPRINTK(DRV, ERR, "pattern test reg %04X failed: " 717 e_info("pattern test reg %04X failed: "
718 "got 0x%08X expected 0x%08X\n", 718 "got 0x%08X expected 0x%08X\n",
719 reg, read, (write & test[i] & mask)); 719 reg, read, (write & test[i] & mask));
720 *data = reg; 720 *data = reg;
721 return true; 721 return true;
722 } 722 }
@@ -734,9 +734,9 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg,
734 writel(write & mask, address); 734 writel(write & mask, address);
735 read = readl(address); 735 read = readl(address);
736 if ((read & mask) != (write & mask)) { 736 if ((read & mask) != (write & mask)) {
737 DPRINTK(DRV, ERR, "set/check reg %04X test failed: " 737 e_err("set/check reg %04X test failed: "
738 "got 0x%08X expected 0x%08X\n", 738 "got 0x%08X expected 0x%08X\n",
739 reg, (read & mask), (write & mask)); 739 reg, (read & mask), (write & mask));
740 *data = reg; 740 *data = reg;
741 return true; 741 return true;
742 } 742 }
@@ -779,8 +779,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
779 ew32(STATUS, toggle); 779 ew32(STATUS, toggle);
780 after = er32(STATUS) & toggle; 780 after = er32(STATUS) & toggle;
781 if (value != after) { 781 if (value != after) {
782 DPRINTK(DRV, ERR, "failed STATUS register test got: " 782 e_err("failed STATUS register test got: "
783 "0x%08X expected: 0x%08X\n", after, value); 783 "0x%08X expected: 0x%08X\n", after, value);
784 *data = 1; 784 *data = 1;
785 return 1; 785 return 1;
786 } 786 }
@@ -894,8 +894,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
894 *data = 1; 894 *data = 1;
895 return -1; 895 return -1;
896 } 896 }
897 DPRINTK(HW, INFO, "testing %s interrupt\n", 897 e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
898 (shared_int ? "shared" : "unshared"));
899 898
900 /* Disable all the interrupts */ 899 /* Disable all the interrupts */
901 ew32(IMC, 0xFFFFFFFF); 900 ew32(IMC, 0xFFFFFFFF);
@@ -980,9 +979,10 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
980 if (txdr->desc && txdr->buffer_info) { 979 if (txdr->desc && txdr->buffer_info) {
981 for (i = 0; i < txdr->count; i++) { 980 for (i = 0; i < txdr->count; i++) {
982 if (txdr->buffer_info[i].dma) 981 if (txdr->buffer_info[i].dma)
983 pci_unmap_single(pdev, txdr->buffer_info[i].dma, 982 dma_unmap_single(&pdev->dev,
983 txdr->buffer_info[i].dma,
984 txdr->buffer_info[i].length, 984 txdr->buffer_info[i].length,
985 PCI_DMA_TODEVICE); 985 DMA_TO_DEVICE);
986 if (txdr->buffer_info[i].skb) 986 if (txdr->buffer_info[i].skb)
987 dev_kfree_skb(txdr->buffer_info[i].skb); 987 dev_kfree_skb(txdr->buffer_info[i].skb);
988 } 988 }
@@ -991,20 +991,23 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
991 if (rxdr->desc && rxdr->buffer_info) { 991 if (rxdr->desc && rxdr->buffer_info) {
992 for (i = 0; i < rxdr->count; i++) { 992 for (i = 0; i < rxdr->count; i++) {
993 if (rxdr->buffer_info[i].dma) 993 if (rxdr->buffer_info[i].dma)
994 pci_unmap_single(pdev, rxdr->buffer_info[i].dma, 994 dma_unmap_single(&pdev->dev,
995 rxdr->buffer_info[i].dma,
995 rxdr->buffer_info[i].length, 996 rxdr->buffer_info[i].length,
996 PCI_DMA_FROMDEVICE); 997 DMA_FROM_DEVICE);
997 if (rxdr->buffer_info[i].skb) 998 if (rxdr->buffer_info[i].skb)
998 dev_kfree_skb(rxdr->buffer_info[i].skb); 999 dev_kfree_skb(rxdr->buffer_info[i].skb);
999 } 1000 }
1000 } 1001 }
1001 1002
1002 if (txdr->desc) { 1003 if (txdr->desc) {
1003 pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma); 1004 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1005 txdr->dma);
1004 txdr->desc = NULL; 1006 txdr->desc = NULL;
1005 } 1007 }
1006 if (rxdr->desc) { 1008 if (rxdr->desc) {
1007 pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma); 1009 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1010 rxdr->dma);
1008 rxdr->desc = NULL; 1011 rxdr->desc = NULL;
1009 } 1012 }
1010 1013
@@ -1039,7 +1042,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1039 1042
1040 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1043 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1041 txdr->size = ALIGN(txdr->size, 4096); 1044 txdr->size = ALIGN(txdr->size, 4096);
1042 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1045 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1046 GFP_KERNEL);
1043 if (!txdr->desc) { 1047 if (!txdr->desc) {
1044 ret_val = 2; 1048 ret_val = 2;
1045 goto err_nomem; 1049 goto err_nomem;
@@ -1070,8 +1074,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1070 txdr->buffer_info[i].skb = skb; 1074 txdr->buffer_info[i].skb = skb;
1071 txdr->buffer_info[i].length = skb->len; 1075 txdr->buffer_info[i].length = skb->len;
1072 txdr->buffer_info[i].dma = 1076 txdr->buffer_info[i].dma =
1073 pci_map_single(pdev, skb->data, skb->len, 1077 dma_map_single(&pdev->dev, skb->data, skb->len,
1074 PCI_DMA_TODEVICE); 1078 DMA_TO_DEVICE);
1075 tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); 1079 tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
1076 tx_desc->lower.data = cpu_to_le32(skb->len); 1080 tx_desc->lower.data = cpu_to_le32(skb->len);
1077 tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | 1081 tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
@@ -1093,7 +1097,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1093 } 1097 }
1094 1098
1095 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); 1099 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
1096 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1100 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1101 GFP_KERNEL);
1097 if (!rxdr->desc) { 1102 if (!rxdr->desc) {
1098 ret_val = 5; 1103 ret_val = 5;
1099 goto err_nomem; 1104 goto err_nomem;
@@ -1126,8 +1131,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1126 rxdr->buffer_info[i].skb = skb; 1131 rxdr->buffer_info[i].skb = skb;
1127 rxdr->buffer_info[i].length = E1000_RXBUFFER_2048; 1132 rxdr->buffer_info[i].length = E1000_RXBUFFER_2048;
1128 rxdr->buffer_info[i].dma = 1133 rxdr->buffer_info[i].dma =
1129 pci_map_single(pdev, skb->data, E1000_RXBUFFER_2048, 1134 dma_map_single(&pdev->dev, skb->data,
1130 PCI_DMA_FROMDEVICE); 1135 E1000_RXBUFFER_2048, DMA_FROM_DEVICE);
1131 rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); 1136 rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
1132 memset(skb->data, 0x00, skb->len); 1137 memset(skb->data, 0x00, skb->len);
1133 } 1138 }
@@ -1444,10 +1449,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1444 for (i = 0; i < 64; i++) { /* send the packets */ 1449 for (i = 0; i < 64; i++) { /* send the packets */
1445 e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1450 e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
1446 1024); 1451 1024);
1447 pci_dma_sync_single_for_device(pdev, 1452 dma_sync_single_for_device(&pdev->dev,
1448 txdr->buffer_info[k].dma, 1453 txdr->buffer_info[k].dma,
1449 txdr->buffer_info[k].length, 1454 txdr->buffer_info[k].length,
1450 PCI_DMA_TODEVICE); 1455 DMA_TO_DEVICE);
1451 if (unlikely(++k == txdr->count)) k = 0; 1456 if (unlikely(++k == txdr->count)) k = 0;
1452 } 1457 }
1453 ew32(TDT, k); 1458 ew32(TDT, k);
@@ -1455,10 +1460,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1455 time = jiffies; /* set the start time for the receive */ 1460 time = jiffies; /* set the start time for the receive */
1456 good_cnt = 0; 1461 good_cnt = 0;
1457 do { /* receive the sent packets */ 1462 do { /* receive the sent packets */
1458 pci_dma_sync_single_for_cpu(pdev, 1463 dma_sync_single_for_cpu(&pdev->dev,
1459 rxdr->buffer_info[l].dma, 1464 rxdr->buffer_info[l].dma,
1460 rxdr->buffer_info[l].length, 1465 rxdr->buffer_info[l].length,
1461 PCI_DMA_FROMDEVICE); 1466 DMA_FROM_DEVICE);
1462 1467
1463 ret_val = e1000_check_lbtest_frame( 1468 ret_val = e1000_check_lbtest_frame(
1464 rxdr->buffer_info[l].skb, 1469 rxdr->buffer_info[l].skb,
@@ -1558,7 +1563,7 @@ static void e1000_diag_test(struct net_device *netdev,
1558 u8 forced_speed_duplex = hw->forced_speed_duplex; 1563 u8 forced_speed_duplex = hw->forced_speed_duplex;
1559 u8 autoneg = hw->autoneg; 1564 u8 autoneg = hw->autoneg;
1560 1565
1561 DPRINTK(HW, INFO, "offline testing starting\n"); 1566 e_info("offline testing starting\n");
1562 1567
1563 /* Link test performed before hardware reset so autoneg doesn't 1568 /* Link test performed before hardware reset so autoneg doesn't
1564 * interfere with test result */ 1569 * interfere with test result */
@@ -1598,7 +1603,7 @@ static void e1000_diag_test(struct net_device *netdev,
1598 if (if_running) 1603 if (if_running)
1599 dev_open(netdev); 1604 dev_open(netdev);
1600 } else { 1605 } else {
1601 DPRINTK(HW, INFO, "online testing starting\n"); 1606 e_info("online testing starting\n");
1602 /* Online tests */ 1607 /* Online tests */
1603 if (e1000_link_test(adapter, &data[4])) 1608 if (e1000_link_test(adapter, &data[4]))
1604 eth_test->flags |= ETH_TEST_FL_FAILED; 1609 eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1691,7 +1696,7 @@ static void e1000_get_wol(struct net_device *netdev,
1691 wol->supported &= ~WAKE_UCAST; 1696 wol->supported &= ~WAKE_UCAST;
1692 1697
1693 if (adapter->wol & E1000_WUFC_EX) 1698 if (adapter->wol & E1000_WUFC_EX)
1694 DPRINTK(DRV, ERR, "Interface does not support " 1699 e_err("Interface does not support "
1695 "directed (unicast) frame wake-up packets\n"); 1700 "directed (unicast) frame wake-up packets\n");
1696 break; 1701 break;
1697 default: 1702 default:
@@ -1725,8 +1730,8 @@ static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1725 switch (hw->device_id) { 1730 switch (hw->device_id) {
1726 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 1731 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1727 if (wol->wolopts & WAKE_UCAST) { 1732 if (wol->wolopts & WAKE_UCAST) {
1728 DPRINTK(DRV, ERR, "Interface does not support " 1733 e_err("Interface does not support "
1729 "directed (unicast) frame wake-up packets\n"); 1734 "directed (unicast) frame wake-up packets\n");
1730 return -EOPNOTSUPP; 1735 return -EOPNOTSUPP;
1731 } 1736 }
1732 break; 1737 break;
@@ -1803,7 +1808,7 @@ static int e1000_get_coalesce(struct net_device *netdev,
1803 if (adapter->hw.mac_type < e1000_82545) 1808 if (adapter->hw.mac_type < e1000_82545)
1804 return -EOPNOTSUPP; 1809 return -EOPNOTSUPP;
1805 1810
1806 if (adapter->itr_setting <= 3) 1811 if (adapter->itr_setting <= 4)
1807 ec->rx_coalesce_usecs = adapter->itr_setting; 1812 ec->rx_coalesce_usecs = adapter->itr_setting;
1808 else 1813 else
1809 ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; 1814 ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
@@ -1821,12 +1826,14 @@ static int e1000_set_coalesce(struct net_device *netdev,
1821 return -EOPNOTSUPP; 1826 return -EOPNOTSUPP;
1822 1827
1823 if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || 1828 if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
1824 ((ec->rx_coalesce_usecs > 3) && 1829 ((ec->rx_coalesce_usecs > 4) &&
1825 (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || 1830 (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
1826 (ec->rx_coalesce_usecs == 2)) 1831 (ec->rx_coalesce_usecs == 2))
1827 return -EINVAL; 1832 return -EINVAL;
1828 1833
1829 if (ec->rx_coalesce_usecs <= 3) { 1834 if (ec->rx_coalesce_usecs == 4) {
1835 adapter->itr = adapter->itr_setting = 4;
1836 } else if (ec->rx_coalesce_usecs <= 3) {
1830 adapter->itr = 20000; 1837 adapter->itr = 20000;
1831 adapter->itr_setting = ec->rx_coalesce_usecs; 1838 adapter->itr_setting = ec->rx_coalesce_usecs;
1832 } else { 1839 } else {
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index e2b6e6e7ba6a..c7e242b69a18 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -30,7 +30,7 @@
30 * Shared functions for accessing and configuring the MAC 30 * Shared functions for accessing and configuring the MAC
31 */ 31 */
32 32
33#include "e1000_hw.h" 33#include "e1000.h"
34 34
35static s32 e1000_check_downshift(struct e1000_hw *hw); 35static s32 e1000_check_downshift(struct e1000_hw *hw);
36static s32 e1000_check_polarity(struct e1000_hw *hw, 36static s32 e1000_check_polarity(struct e1000_hw *hw,
@@ -114,7 +114,7 @@ static DEFINE_SPINLOCK(e1000_eeprom_lock);
114 */ 114 */
115static s32 e1000_set_phy_type(struct e1000_hw *hw) 115static s32 e1000_set_phy_type(struct e1000_hw *hw)
116{ 116{
117 DEBUGFUNC("e1000_set_phy_type"); 117 e_dbg("e1000_set_phy_type");
118 118
119 if (hw->mac_type == e1000_undefined) 119 if (hw->mac_type == e1000_undefined)
120 return -E1000_ERR_PHY_TYPE; 120 return -E1000_ERR_PHY_TYPE;
@@ -152,7 +152,7 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
152 u32 ret_val; 152 u32 ret_val;
153 u16 phy_saved_data; 153 u16 phy_saved_data;
154 154
155 DEBUGFUNC("e1000_phy_init_script"); 155 e_dbg("e1000_phy_init_script");
156 156
157 if (hw->phy_init_script) { 157 if (hw->phy_init_script) {
158 msleep(20); 158 msleep(20);
@@ -245,7 +245,7 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
245 */ 245 */
246s32 e1000_set_mac_type(struct e1000_hw *hw) 246s32 e1000_set_mac_type(struct e1000_hw *hw)
247{ 247{
248 DEBUGFUNC("e1000_set_mac_type"); 248 e_dbg("e1000_set_mac_type");
249 249
250 switch (hw->device_id) { 250 switch (hw->device_id) {
251 case E1000_DEV_ID_82542: 251 case E1000_DEV_ID_82542:
@@ -354,7 +354,7 @@ void e1000_set_media_type(struct e1000_hw *hw)
354{ 354{
355 u32 status; 355 u32 status;
356 356
357 DEBUGFUNC("e1000_set_media_type"); 357 e_dbg("e1000_set_media_type");
358 358
359 if (hw->mac_type != e1000_82543) { 359 if (hw->mac_type != e1000_82543) {
360 /* tbi_compatibility is only valid on 82543 */ 360 /* tbi_compatibility is only valid on 82543 */
@@ -401,16 +401,16 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
401 u32 led_ctrl; 401 u32 led_ctrl;
402 s32 ret_val; 402 s32 ret_val;
403 403
404 DEBUGFUNC("e1000_reset_hw"); 404 e_dbg("e1000_reset_hw");
405 405
406 /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ 406 /* For 82542 (rev 2.0), disable MWI before issuing a device reset */
407 if (hw->mac_type == e1000_82542_rev2_0) { 407 if (hw->mac_type == e1000_82542_rev2_0) {
408 DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); 408 e_dbg("Disabling MWI on 82542 rev 2.0\n");
409 e1000_pci_clear_mwi(hw); 409 e1000_pci_clear_mwi(hw);
410 } 410 }
411 411
412 /* Clear interrupt mask to stop board from generating interrupts */ 412 /* Clear interrupt mask to stop board from generating interrupts */
413 DEBUGOUT("Masking off all interrupts\n"); 413 e_dbg("Masking off all interrupts\n");
414 ew32(IMC, 0xffffffff); 414 ew32(IMC, 0xffffffff);
415 415
416 /* Disable the Transmit and Receive units. Then delay to allow 416 /* Disable the Transmit and Receive units. Then delay to allow
@@ -442,7 +442,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
442 * the current PCI configuration. The global reset bit is self- 442 * the current PCI configuration. The global reset bit is self-
443 * clearing, and should clear within a microsecond. 443 * clearing, and should clear within a microsecond.
444 */ 444 */
445 DEBUGOUT("Issuing a global reset to MAC\n"); 445 e_dbg("Issuing a global reset to MAC\n");
446 446
447 switch (hw->mac_type) { 447 switch (hw->mac_type) {
448 case e1000_82544: 448 case e1000_82544:
@@ -516,7 +516,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
516 } 516 }
517 517
518 /* Clear interrupt mask to stop board from generating interrupts */ 518 /* Clear interrupt mask to stop board from generating interrupts */
519 DEBUGOUT("Masking off all interrupts\n"); 519 e_dbg("Masking off all interrupts\n");
520 ew32(IMC, 0xffffffff); 520 ew32(IMC, 0xffffffff);
521 521
522 /* Clear any pending interrupt events. */ 522 /* Clear any pending interrupt events. */
@@ -549,12 +549,12 @@ s32 e1000_init_hw(struct e1000_hw *hw)
549 u32 mta_size; 549 u32 mta_size;
550 u32 ctrl_ext; 550 u32 ctrl_ext;
551 551
552 DEBUGFUNC("e1000_init_hw"); 552 e_dbg("e1000_init_hw");
553 553
554 /* Initialize Identification LED */ 554 /* Initialize Identification LED */
555 ret_val = e1000_id_led_init(hw); 555 ret_val = e1000_id_led_init(hw);
556 if (ret_val) { 556 if (ret_val) {
557 DEBUGOUT("Error Initializing Identification LED\n"); 557 e_dbg("Error Initializing Identification LED\n");
558 return ret_val; 558 return ret_val;
559 } 559 }
560 560
@@ -562,14 +562,14 @@ s32 e1000_init_hw(struct e1000_hw *hw)
562 e1000_set_media_type(hw); 562 e1000_set_media_type(hw);
563 563
564 /* Disabling VLAN filtering. */ 564 /* Disabling VLAN filtering. */
565 DEBUGOUT("Initializing the IEEE VLAN\n"); 565 e_dbg("Initializing the IEEE VLAN\n");
566 if (hw->mac_type < e1000_82545_rev_3) 566 if (hw->mac_type < e1000_82545_rev_3)
567 ew32(VET, 0); 567 ew32(VET, 0);
568 e1000_clear_vfta(hw); 568 e1000_clear_vfta(hw);
569 569
570 /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ 570 /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
571 if (hw->mac_type == e1000_82542_rev2_0) { 571 if (hw->mac_type == e1000_82542_rev2_0) {
572 DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); 572 e_dbg("Disabling MWI on 82542 rev 2.0\n");
573 e1000_pci_clear_mwi(hw); 573 e1000_pci_clear_mwi(hw);
574 ew32(RCTL, E1000_RCTL_RST); 574 ew32(RCTL, E1000_RCTL_RST);
575 E1000_WRITE_FLUSH(); 575 E1000_WRITE_FLUSH();
@@ -591,7 +591,7 @@ s32 e1000_init_hw(struct e1000_hw *hw)
591 } 591 }
592 592
593 /* Zero out the Multicast HASH table */ 593 /* Zero out the Multicast HASH table */
594 DEBUGOUT("Zeroing the MTA\n"); 594 e_dbg("Zeroing the MTA\n");
595 mta_size = E1000_MC_TBL_SIZE; 595 mta_size = E1000_MC_TBL_SIZE;
596 for (i = 0; i < mta_size; i++) { 596 for (i = 0; i < mta_size; i++) {
597 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 597 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
@@ -662,7 +662,7 @@ static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
662 u16 eeprom_data; 662 u16 eeprom_data;
663 s32 ret_val; 663 s32 ret_val;
664 664
665 DEBUGFUNC("e1000_adjust_serdes_amplitude"); 665 e_dbg("e1000_adjust_serdes_amplitude");
666 666
667 if (hw->media_type != e1000_media_type_internal_serdes) 667 if (hw->media_type != e1000_media_type_internal_serdes)
668 return E1000_SUCCESS; 668 return E1000_SUCCESS;
@@ -709,7 +709,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
709 s32 ret_val; 709 s32 ret_val;
710 u16 eeprom_data; 710 u16 eeprom_data;
711 711
712 DEBUGFUNC("e1000_setup_link"); 712 e_dbg("e1000_setup_link");
713 713
714 /* Read and store word 0x0F of the EEPROM. This word contains bits 714 /* Read and store word 0x0F of the EEPROM. This word contains bits
715 * that determine the hardware's default PAUSE (flow control) mode, 715 * that determine the hardware's default PAUSE (flow control) mode,
@@ -723,7 +723,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
723 ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, 723 ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
724 1, &eeprom_data); 724 1, &eeprom_data);
725 if (ret_val) { 725 if (ret_val) {
726 DEBUGOUT("EEPROM Read Error\n"); 726 e_dbg("EEPROM Read Error\n");
727 return -E1000_ERR_EEPROM; 727 return -E1000_ERR_EEPROM;
728 } 728 }
729 if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0) 729 if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0)
@@ -747,7 +747,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
747 747
748 hw->original_fc = hw->fc; 748 hw->original_fc = hw->fc;
749 749
750 DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc); 750 e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc);
751 751
752 /* Take the 4 bits from EEPROM word 0x0F that determine the initial 752 /* Take the 4 bits from EEPROM word 0x0F that determine the initial
753 * polarity value for the SW controlled pins, and setup the 753 * polarity value for the SW controlled pins, and setup the
@@ -760,7 +760,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
760 ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, 760 ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
761 1, &eeprom_data); 761 1, &eeprom_data);
762 if (ret_val) { 762 if (ret_val) {
763 DEBUGOUT("EEPROM Read Error\n"); 763 e_dbg("EEPROM Read Error\n");
764 return -E1000_ERR_EEPROM; 764 return -E1000_ERR_EEPROM;
765 } 765 }
766 ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << 766 ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
@@ -777,8 +777,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
777 * control is disabled, because it does not hurt anything to 777 * control is disabled, because it does not hurt anything to
778 * initialize these registers. 778 * initialize these registers.
779 */ 779 */
780 DEBUGOUT 780 e_dbg("Initializing the Flow Control address, type and timer regs\n");
781 ("Initializing the Flow Control address, type and timer regs\n");
782 781
783 ew32(FCT, FLOW_CONTROL_TYPE); 782 ew32(FCT, FLOW_CONTROL_TYPE);
784 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); 783 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
@@ -827,7 +826,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
827 u32 signal = 0; 826 u32 signal = 0;
828 s32 ret_val; 827 s32 ret_val;
829 828
830 DEBUGFUNC("e1000_setup_fiber_serdes_link"); 829 e_dbg("e1000_setup_fiber_serdes_link");
831 830
832 /* On adapters with a MAC newer than 82544, SWDP 1 will be 831 /* On adapters with a MAC newer than 82544, SWDP 1 will be
833 * set when the optics detect a signal. On older adapters, it will be 832 * set when the optics detect a signal. On older adapters, it will be
@@ -893,7 +892,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
893 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); 892 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
894 break; 893 break;
895 default: 894 default:
896 DEBUGOUT("Flow control param set incorrectly\n"); 895 e_dbg("Flow control param set incorrectly\n");
897 return -E1000_ERR_CONFIG; 896 return -E1000_ERR_CONFIG;
898 break; 897 break;
899 } 898 }
@@ -904,7 +903,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
904 * link-up status bit will be set and the flow control enable bits (RFCE 903 * link-up status bit will be set and the flow control enable bits (RFCE
905 * and TFCE) will be set according to their negotiated value. 904 * and TFCE) will be set according to their negotiated value.
906 */ 905 */
907 DEBUGOUT("Auto-negotiation enabled\n"); 906 e_dbg("Auto-negotiation enabled\n");
908 907
909 ew32(TXCW, txcw); 908 ew32(TXCW, txcw);
910 ew32(CTRL, ctrl); 909 ew32(CTRL, ctrl);
@@ -921,7 +920,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
921 */ 920 */
922 if (hw->media_type == e1000_media_type_internal_serdes || 921 if (hw->media_type == e1000_media_type_internal_serdes ||
923 (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { 922 (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
924 DEBUGOUT("Looking for Link\n"); 923 e_dbg("Looking for Link\n");
925 for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { 924 for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
926 msleep(10); 925 msleep(10);
927 status = er32(STATUS); 926 status = er32(STATUS);
@@ -929,7 +928,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
929 break; 928 break;
930 } 929 }
931 if (i == (LINK_UP_TIMEOUT / 10)) { 930 if (i == (LINK_UP_TIMEOUT / 10)) {
932 DEBUGOUT("Never got a valid link from auto-neg!!!\n"); 931 e_dbg("Never got a valid link from auto-neg!!!\n");
933 hw->autoneg_failed = 1; 932 hw->autoneg_failed = 1;
934 /* AutoNeg failed to achieve a link, so we'll call 933 /* AutoNeg failed to achieve a link, so we'll call
935 * e1000_check_for_link. This routine will force the link up if 934 * e1000_check_for_link. This routine will force the link up if
@@ -938,16 +937,16 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
938 */ 937 */
939 ret_val = e1000_check_for_link(hw); 938 ret_val = e1000_check_for_link(hw);
940 if (ret_val) { 939 if (ret_val) {
941 DEBUGOUT("Error while checking for link\n"); 940 e_dbg("Error while checking for link\n");
942 return ret_val; 941 return ret_val;
943 } 942 }
944 hw->autoneg_failed = 0; 943 hw->autoneg_failed = 0;
945 } else { 944 } else {
946 hw->autoneg_failed = 0; 945 hw->autoneg_failed = 0;
947 DEBUGOUT("Valid Link Found\n"); 946 e_dbg("Valid Link Found\n");
948 } 947 }
949 } else { 948 } else {
950 DEBUGOUT("No Signal Detected\n"); 949 e_dbg("No Signal Detected\n");
951 } 950 }
952 return E1000_SUCCESS; 951 return E1000_SUCCESS;
953} 952}
@@ -964,7 +963,7 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
964 s32 ret_val; 963 s32 ret_val;
965 u16 phy_data; 964 u16 phy_data;
966 965
967 DEBUGFUNC("e1000_copper_link_preconfig"); 966 e_dbg("e1000_copper_link_preconfig");
968 967
969 ctrl = er32(CTRL); 968 ctrl = er32(CTRL);
970 /* With 82543, we need to force speed and duplex on the MAC equal to what 969 /* With 82543, we need to force speed and duplex on the MAC equal to what
@@ -987,10 +986,10 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
987 /* Make sure we have a valid PHY */ 986 /* Make sure we have a valid PHY */
988 ret_val = e1000_detect_gig_phy(hw); 987 ret_val = e1000_detect_gig_phy(hw);
989 if (ret_val) { 988 if (ret_val) {
990 DEBUGOUT("Error, did not detect valid phy.\n"); 989 e_dbg("Error, did not detect valid phy.\n");
991 return ret_val; 990 return ret_val;
992 } 991 }
993 DEBUGOUT1("Phy ID = %x\n", hw->phy_id); 992 e_dbg("Phy ID = %x\n", hw->phy_id);
994 993
995 /* Set PHY to class A mode (if necessary) */ 994 /* Set PHY to class A mode (if necessary) */
996 ret_val = e1000_set_phy_mode(hw); 995 ret_val = e1000_set_phy_mode(hw);
@@ -1025,14 +1024,14 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
1025 s32 ret_val; 1024 s32 ret_val;
1026 u16 phy_data; 1025 u16 phy_data;
1027 1026
1028 DEBUGFUNC("e1000_copper_link_igp_setup"); 1027 e_dbg("e1000_copper_link_igp_setup");
1029 1028
1030 if (hw->phy_reset_disable) 1029 if (hw->phy_reset_disable)
1031 return E1000_SUCCESS; 1030 return E1000_SUCCESS;
1032 1031
1033 ret_val = e1000_phy_reset(hw); 1032 ret_val = e1000_phy_reset(hw);
1034 if (ret_val) { 1033 if (ret_val) {
1035 DEBUGOUT("Error Resetting the PHY\n"); 1034 e_dbg("Error Resetting the PHY\n");
1036 return ret_val; 1035 return ret_val;
1037 } 1036 }
1038 1037
@@ -1049,7 +1048,7 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
1049 /* disable lplu d3 during driver init */ 1048 /* disable lplu d3 during driver init */
1050 ret_val = e1000_set_d3_lplu_state(hw, false); 1049 ret_val = e1000_set_d3_lplu_state(hw, false);
1051 if (ret_val) { 1050 if (ret_val) {
1052 DEBUGOUT("Error Disabling LPLU D3\n"); 1051 e_dbg("Error Disabling LPLU D3\n");
1053 return ret_val; 1052 return ret_val;
1054 } 1053 }
1055 } 1054 }
@@ -1166,7 +1165,7 @@ static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
1166 s32 ret_val; 1165 s32 ret_val;
1167 u16 phy_data; 1166 u16 phy_data;
1168 1167
1169 DEBUGFUNC("e1000_copper_link_mgp_setup"); 1168 e_dbg("e1000_copper_link_mgp_setup");
1170 1169
1171 if (hw->phy_reset_disable) 1170 if (hw->phy_reset_disable)
1172 return E1000_SUCCESS; 1171 return E1000_SUCCESS;
@@ -1255,7 +1254,7 @@ static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
1255 /* SW Reset the PHY so all changes take effect */ 1254 /* SW Reset the PHY so all changes take effect */
1256 ret_val = e1000_phy_reset(hw); 1255 ret_val = e1000_phy_reset(hw);
1257 if (ret_val) { 1256 if (ret_val) {
1258 DEBUGOUT("Error Resetting the PHY\n"); 1257 e_dbg("Error Resetting the PHY\n");
1259 return ret_val; 1258 return ret_val;
1260 } 1259 }
1261 1260
@@ -1274,7 +1273,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1274 s32 ret_val; 1273 s32 ret_val;
1275 u16 phy_data; 1274 u16 phy_data;
1276 1275
1277 DEBUGFUNC("e1000_copper_link_autoneg"); 1276 e_dbg("e1000_copper_link_autoneg");
1278 1277
1279 /* Perform some bounds checking on the hw->autoneg_advertised 1278 /* Perform some bounds checking on the hw->autoneg_advertised
1280 * parameter. If this variable is zero, then set it to the default. 1279 * parameter. If this variable is zero, then set it to the default.
@@ -1287,13 +1286,13 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1287 if (hw->autoneg_advertised == 0) 1286 if (hw->autoneg_advertised == 0)
1288 hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 1287 hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
1289 1288
1290 DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); 1289 e_dbg("Reconfiguring auto-neg advertisement params\n");
1291 ret_val = e1000_phy_setup_autoneg(hw); 1290 ret_val = e1000_phy_setup_autoneg(hw);
1292 if (ret_val) { 1291 if (ret_val) {
1293 DEBUGOUT("Error Setting up Auto-Negotiation\n"); 1292 e_dbg("Error Setting up Auto-Negotiation\n");
1294 return ret_val; 1293 return ret_val;
1295 } 1294 }
1296 DEBUGOUT("Restarting Auto-Neg\n"); 1295 e_dbg("Restarting Auto-Neg\n");
1297 1296
1298 /* Restart auto-negotiation by setting the Auto Neg Enable bit and 1297 /* Restart auto-negotiation by setting the Auto Neg Enable bit and
1299 * the Auto Neg Restart bit in the PHY control register. 1298 * the Auto Neg Restart bit in the PHY control register.
@@ -1313,7 +1312,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1313 if (hw->wait_autoneg_complete) { 1312 if (hw->wait_autoneg_complete) {
1314 ret_val = e1000_wait_autoneg(hw); 1313 ret_val = e1000_wait_autoneg(hw);
1315 if (ret_val) { 1314 if (ret_val) {
1316 DEBUGOUT 1315 e_dbg
1317 ("Error while waiting for autoneg to complete\n"); 1316 ("Error while waiting for autoneg to complete\n");
1318 return ret_val; 1317 return ret_val;
1319 } 1318 }
@@ -1340,20 +1339,20 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1340static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) 1339static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
1341{ 1340{
1342 s32 ret_val; 1341 s32 ret_val;
1343 DEBUGFUNC("e1000_copper_link_postconfig"); 1342 e_dbg("e1000_copper_link_postconfig");
1344 1343
1345 if (hw->mac_type >= e1000_82544) { 1344 if (hw->mac_type >= e1000_82544) {
1346 e1000_config_collision_dist(hw); 1345 e1000_config_collision_dist(hw);
1347 } else { 1346 } else {
1348 ret_val = e1000_config_mac_to_phy(hw); 1347 ret_val = e1000_config_mac_to_phy(hw);
1349 if (ret_val) { 1348 if (ret_val) {
1350 DEBUGOUT("Error configuring MAC to PHY settings\n"); 1349 e_dbg("Error configuring MAC to PHY settings\n");
1351 return ret_val; 1350 return ret_val;
1352 } 1351 }
1353 } 1352 }
1354 ret_val = e1000_config_fc_after_link_up(hw); 1353 ret_val = e1000_config_fc_after_link_up(hw);
1355 if (ret_val) { 1354 if (ret_val) {
1356 DEBUGOUT("Error Configuring Flow Control\n"); 1355 e_dbg("Error Configuring Flow Control\n");
1357 return ret_val; 1356 return ret_val;
1358 } 1357 }
1359 1358
@@ -1361,7 +1360,7 @@ static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
1361 if (hw->phy_type == e1000_phy_igp) { 1360 if (hw->phy_type == e1000_phy_igp) {
1362 ret_val = e1000_config_dsp_after_link_change(hw, true); 1361 ret_val = e1000_config_dsp_after_link_change(hw, true);
1363 if (ret_val) { 1362 if (ret_val) {
1364 DEBUGOUT("Error Configuring DSP after link up\n"); 1363 e_dbg("Error Configuring DSP after link up\n");
1365 return ret_val; 1364 return ret_val;
1366 } 1365 }
1367 } 1366 }
@@ -1381,7 +1380,7 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
1381 u16 i; 1380 u16 i;
1382 u16 phy_data; 1381 u16 phy_data;
1383 1382
1384 DEBUGFUNC("e1000_setup_copper_link"); 1383 e_dbg("e1000_setup_copper_link");
1385 1384
1386 /* Check if it is a valid PHY and set PHY mode if necessary. */ 1385 /* Check if it is a valid PHY and set PHY mode if necessary. */
1387 ret_val = e1000_copper_link_preconfig(hw); 1386 ret_val = e1000_copper_link_preconfig(hw);
@@ -1407,10 +1406,10 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
1407 } else { 1406 } else {
1408 /* PHY will be set to 10H, 10F, 100H,or 100F 1407 /* PHY will be set to 10H, 10F, 100H,or 100F
1409 * depending on value from forced_speed_duplex. */ 1408 * depending on value from forced_speed_duplex. */
1410 DEBUGOUT("Forcing speed and duplex\n"); 1409 e_dbg("Forcing speed and duplex\n");
1411 ret_val = e1000_phy_force_speed_duplex(hw); 1410 ret_val = e1000_phy_force_speed_duplex(hw);
1412 if (ret_val) { 1411 if (ret_val) {
1413 DEBUGOUT("Error Forcing Speed and Duplex\n"); 1412 e_dbg("Error Forcing Speed and Duplex\n");
1414 return ret_val; 1413 return ret_val;
1415 } 1414 }
1416 } 1415 }
@@ -1432,13 +1431,13 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
1432 if (ret_val) 1431 if (ret_val)
1433 return ret_val; 1432 return ret_val;
1434 1433
1435 DEBUGOUT("Valid link established!!!\n"); 1434 e_dbg("Valid link established!!!\n");
1436 return E1000_SUCCESS; 1435 return E1000_SUCCESS;
1437 } 1436 }
1438 udelay(10); 1437 udelay(10);
1439 } 1438 }
1440 1439
1441 DEBUGOUT("Unable to establish link!!!\n"); 1440 e_dbg("Unable to establish link!!!\n");
1442 return E1000_SUCCESS; 1441 return E1000_SUCCESS;
1443} 1442}
1444 1443
@@ -1454,7 +1453,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1454 u16 mii_autoneg_adv_reg; 1453 u16 mii_autoneg_adv_reg;
1455 u16 mii_1000t_ctrl_reg; 1454 u16 mii_1000t_ctrl_reg;
1456 1455
1457 DEBUGFUNC("e1000_phy_setup_autoneg"); 1456 e_dbg("e1000_phy_setup_autoneg");
1458 1457
1459 /* Read the MII Auto-Neg Advertisement Register (Address 4). */ 1458 /* Read the MII Auto-Neg Advertisement Register (Address 4). */
1460 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); 1459 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
@@ -1481,41 +1480,41 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1481 mii_autoneg_adv_reg &= ~REG4_SPEED_MASK; 1480 mii_autoneg_adv_reg &= ~REG4_SPEED_MASK;
1482 mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; 1481 mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
1483 1482
1484 DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised); 1483 e_dbg("autoneg_advertised %x\n", hw->autoneg_advertised);
1485 1484
1486 /* Do we want to advertise 10 Mb Half Duplex? */ 1485 /* Do we want to advertise 10 Mb Half Duplex? */
1487 if (hw->autoneg_advertised & ADVERTISE_10_HALF) { 1486 if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
1488 DEBUGOUT("Advertise 10mb Half duplex\n"); 1487 e_dbg("Advertise 10mb Half duplex\n");
1489 mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; 1488 mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
1490 } 1489 }
1491 1490
1492 /* Do we want to advertise 10 Mb Full Duplex? */ 1491 /* Do we want to advertise 10 Mb Full Duplex? */
1493 if (hw->autoneg_advertised & ADVERTISE_10_FULL) { 1492 if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
1494 DEBUGOUT("Advertise 10mb Full duplex\n"); 1493 e_dbg("Advertise 10mb Full duplex\n");
1495 mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; 1494 mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
1496 } 1495 }
1497 1496
1498 /* Do we want to advertise 100 Mb Half Duplex? */ 1497 /* Do we want to advertise 100 Mb Half Duplex? */
1499 if (hw->autoneg_advertised & ADVERTISE_100_HALF) { 1498 if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
1500 DEBUGOUT("Advertise 100mb Half duplex\n"); 1499 e_dbg("Advertise 100mb Half duplex\n");
1501 mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; 1500 mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
1502 } 1501 }
1503 1502
1504 /* Do we want to advertise 100 Mb Full Duplex? */ 1503 /* Do we want to advertise 100 Mb Full Duplex? */
1505 if (hw->autoneg_advertised & ADVERTISE_100_FULL) { 1504 if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
1506 DEBUGOUT("Advertise 100mb Full duplex\n"); 1505 e_dbg("Advertise 100mb Full duplex\n");
1507 mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; 1506 mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
1508 } 1507 }
1509 1508
1510 /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ 1509 /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
1511 if (hw->autoneg_advertised & ADVERTISE_1000_HALF) { 1510 if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
1512 DEBUGOUT 1511 e_dbg
1513 ("Advertise 1000mb Half duplex requested, request denied!\n"); 1512 ("Advertise 1000mb Half duplex requested, request denied!\n");
1514 } 1513 }
1515 1514
1516 /* Do we want to advertise 1000 Mb Full Duplex? */ 1515 /* Do we want to advertise 1000 Mb Full Duplex? */
1517 if (hw->autoneg_advertised & ADVERTISE_1000_FULL) { 1516 if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
1518 DEBUGOUT("Advertise 1000mb Full duplex\n"); 1517 e_dbg("Advertise 1000mb Full duplex\n");
1519 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; 1518 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
1520 } 1519 }
1521 1520
@@ -1568,7 +1567,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1568 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 1567 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
1569 break; 1568 break;
1570 default: 1569 default:
1571 DEBUGOUT("Flow control param set incorrectly\n"); 1570 e_dbg("Flow control param set incorrectly\n");
1572 return -E1000_ERR_CONFIG; 1571 return -E1000_ERR_CONFIG;
1573 } 1572 }
1574 1573
@@ -1576,7 +1575,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1576 if (ret_val) 1575 if (ret_val)
1577 return ret_val; 1576 return ret_val;
1578 1577
1579 DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); 1578 e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
1580 1579
1581 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); 1580 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
1582 if (ret_val) 1581 if (ret_val)
@@ -1600,12 +1599,12 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1600 u16 phy_data; 1599 u16 phy_data;
1601 u16 i; 1600 u16 i;
1602 1601
1603 DEBUGFUNC("e1000_phy_force_speed_duplex"); 1602 e_dbg("e1000_phy_force_speed_duplex");
1604 1603
1605 /* Turn off Flow control if we are forcing speed and duplex. */ 1604 /* Turn off Flow control if we are forcing speed and duplex. */
1606 hw->fc = E1000_FC_NONE; 1605 hw->fc = E1000_FC_NONE;
1607 1606
1608 DEBUGOUT1("hw->fc = %d\n", hw->fc); 1607 e_dbg("hw->fc = %d\n", hw->fc);
1609 1608
1610 /* Read the Device Control Register. */ 1609 /* Read the Device Control Register. */
1611 ctrl = er32(CTRL); 1610 ctrl = er32(CTRL);
@@ -1634,14 +1633,14 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1634 */ 1633 */
1635 ctrl |= E1000_CTRL_FD; 1634 ctrl |= E1000_CTRL_FD;
1636 mii_ctrl_reg |= MII_CR_FULL_DUPLEX; 1635 mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
1637 DEBUGOUT("Full Duplex\n"); 1636 e_dbg("Full Duplex\n");
1638 } else { 1637 } else {
1639 /* We want to force half duplex so we CLEAR the full duplex bits in 1638 /* We want to force half duplex so we CLEAR the full duplex bits in
1640 * the Device and MII Control Registers. 1639 * the Device and MII Control Registers.
1641 */ 1640 */
1642 ctrl &= ~E1000_CTRL_FD; 1641 ctrl &= ~E1000_CTRL_FD;
1643 mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX; 1642 mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
1644 DEBUGOUT("Half Duplex\n"); 1643 e_dbg("Half Duplex\n");
1645 } 1644 }
1646 1645
1647 /* Are we forcing 100Mbps??? */ 1646 /* Are we forcing 100Mbps??? */
@@ -1651,13 +1650,13 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1651 ctrl |= E1000_CTRL_SPD_100; 1650 ctrl |= E1000_CTRL_SPD_100;
1652 mii_ctrl_reg |= MII_CR_SPEED_100; 1651 mii_ctrl_reg |= MII_CR_SPEED_100;
1653 mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); 1652 mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
1654 DEBUGOUT("Forcing 100mb "); 1653 e_dbg("Forcing 100mb ");
1655 } else { 1654 } else {
1656 /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */ 1655 /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */
1657 ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); 1656 ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1658 mii_ctrl_reg |= MII_CR_SPEED_10; 1657 mii_ctrl_reg |= MII_CR_SPEED_10;
1659 mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); 1658 mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
1660 DEBUGOUT("Forcing 10mb "); 1659 e_dbg("Forcing 10mb ");
1661 } 1660 }
1662 1661
1663 e1000_config_collision_dist(hw); 1662 e1000_config_collision_dist(hw);
@@ -1680,7 +1679,7 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1680 if (ret_val) 1679 if (ret_val)
1681 return ret_val; 1680 return ret_val;
1682 1681
1683 DEBUGOUT1("M88E1000 PSCR: %x\n", phy_data); 1682 e_dbg("M88E1000 PSCR: %x\n", phy_data);
1684 1683
1685 /* Need to reset the PHY or these changes will be ignored */ 1684 /* Need to reset the PHY or these changes will be ignored */
1686 mii_ctrl_reg |= MII_CR_RESET; 1685 mii_ctrl_reg |= MII_CR_RESET;
@@ -1720,7 +1719,7 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1720 */ 1719 */
1721 if (hw->wait_autoneg_complete) { 1720 if (hw->wait_autoneg_complete) {
1722 /* We will wait for autoneg to complete. */ 1721 /* We will wait for autoneg to complete. */
1723 DEBUGOUT("Waiting for forced speed/duplex link.\n"); 1722 e_dbg("Waiting for forced speed/duplex link.\n");
1724 mii_status_reg = 0; 1723 mii_status_reg = 0;
1725 1724
1726 /* We will wait for autoneg to complete or 4.5 seconds to expire. */ 1725 /* We will wait for autoneg to complete or 4.5 seconds to expire. */
@@ -1746,7 +1745,7 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1746 /* We didn't get link. Reset the DSP and wait again for link. */ 1745 /* We didn't get link. Reset the DSP and wait again for link. */
1747 ret_val = e1000_phy_reset_dsp(hw); 1746 ret_val = e1000_phy_reset_dsp(hw);
1748 if (ret_val) { 1747 if (ret_val) {
1749 DEBUGOUT("Error Resetting PHY DSP\n"); 1748 e_dbg("Error Resetting PHY DSP\n");
1750 return ret_val; 1749 return ret_val;
1751 } 1750 }
1752 } 1751 }
@@ -1826,7 +1825,7 @@ void e1000_config_collision_dist(struct e1000_hw *hw)
1826{ 1825{
1827 u32 tctl, coll_dist; 1826 u32 tctl, coll_dist;
1828 1827
1829 DEBUGFUNC("e1000_config_collision_dist"); 1828 e_dbg("e1000_config_collision_dist");
1830 1829
1831 if (hw->mac_type < e1000_82543) 1830 if (hw->mac_type < e1000_82543)
1832 coll_dist = E1000_COLLISION_DISTANCE_82542; 1831 coll_dist = E1000_COLLISION_DISTANCE_82542;
@@ -1857,7 +1856,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
1857 s32 ret_val; 1856 s32 ret_val;
1858 u16 phy_data; 1857 u16 phy_data;
1859 1858
1860 DEBUGFUNC("e1000_config_mac_to_phy"); 1859 e_dbg("e1000_config_mac_to_phy");
1861 1860
1862 /* 82544 or newer MAC, Auto Speed Detection takes care of 1861 /* 82544 or newer MAC, Auto Speed Detection takes care of
1863 * MAC speed/duplex configuration.*/ 1862 * MAC speed/duplex configuration.*/
@@ -1913,7 +1912,7 @@ s32 e1000_force_mac_fc(struct e1000_hw *hw)
1913{ 1912{
1914 u32 ctrl; 1913 u32 ctrl;
1915 1914
1916 DEBUGFUNC("e1000_force_mac_fc"); 1915 e_dbg("e1000_force_mac_fc");
1917 1916
1918 /* Get the current configuration of the Device Control Register */ 1917 /* Get the current configuration of the Device Control Register */
1919 ctrl = er32(CTRL); 1918 ctrl = er32(CTRL);
@@ -1952,7 +1951,7 @@ s32 e1000_force_mac_fc(struct e1000_hw *hw)
1952 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); 1951 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
1953 break; 1952 break;
1954 default: 1953 default:
1955 DEBUGOUT("Flow control param set incorrectly\n"); 1954 e_dbg("Flow control param set incorrectly\n");
1956 return -E1000_ERR_CONFIG; 1955 return -E1000_ERR_CONFIG;
1957 } 1956 }
1958 1957
@@ -1984,7 +1983,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
1984 u16 speed; 1983 u16 speed;
1985 u16 duplex; 1984 u16 duplex;
1986 1985
1987 DEBUGFUNC("e1000_config_fc_after_link_up"); 1986 e_dbg("e1000_config_fc_after_link_up");
1988 1987
1989 /* Check for the case where we have fiber media and auto-neg failed 1988 /* Check for the case where we have fiber media and auto-neg failed
1990 * so we had to force link. In this case, we need to force the 1989 * so we had to force link. In this case, we need to force the
@@ -1997,7 +1996,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
1997 && (!hw->autoneg))) { 1996 && (!hw->autoneg))) {
1998 ret_val = e1000_force_mac_fc(hw); 1997 ret_val = e1000_force_mac_fc(hw);
1999 if (ret_val) { 1998 if (ret_val) {
2000 DEBUGOUT("Error forcing flow control settings\n"); 1999 e_dbg("Error forcing flow control settings\n");
2001 return ret_val; 2000 return ret_val;
2002 } 2001 }
2003 } 2002 }
@@ -2079,10 +2078,10 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
2079 */ 2078 */
2080 if (hw->original_fc == E1000_FC_FULL) { 2079 if (hw->original_fc == E1000_FC_FULL) {
2081 hw->fc = E1000_FC_FULL; 2080 hw->fc = E1000_FC_FULL;
2082 DEBUGOUT("Flow Control = FULL.\n"); 2081 e_dbg("Flow Control = FULL.\n");
2083 } else { 2082 } else {
2084 hw->fc = E1000_FC_RX_PAUSE; 2083 hw->fc = E1000_FC_RX_PAUSE;
2085 DEBUGOUT 2084 e_dbg
2086 ("Flow Control = RX PAUSE frames only.\n"); 2085 ("Flow Control = RX PAUSE frames only.\n");
2087 } 2086 }
2088 } 2087 }
@@ -2100,7 +2099,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
2100 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) 2099 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
2101 { 2100 {
2102 hw->fc = E1000_FC_TX_PAUSE; 2101 hw->fc = E1000_FC_TX_PAUSE;
2103 DEBUGOUT 2102 e_dbg
2104 ("Flow Control = TX PAUSE frames only.\n"); 2103 ("Flow Control = TX PAUSE frames only.\n");
2105 } 2104 }
2106 /* For transmitting PAUSE frames ONLY. 2105 /* For transmitting PAUSE frames ONLY.
@@ -2117,7 +2116,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
2117 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) 2116 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
2118 { 2117 {
2119 hw->fc = E1000_FC_RX_PAUSE; 2118 hw->fc = E1000_FC_RX_PAUSE;
2120 DEBUGOUT 2119 e_dbg
2121 ("Flow Control = RX PAUSE frames only.\n"); 2120 ("Flow Control = RX PAUSE frames only.\n");
2122 } 2121 }
2123 /* Per the IEEE spec, at this point flow control should be 2122 /* Per the IEEE spec, at this point flow control should be
@@ -2144,10 +2143,10 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
2144 hw->original_fc == E1000_FC_TX_PAUSE) || 2143 hw->original_fc == E1000_FC_TX_PAUSE) ||
2145 hw->fc_strict_ieee) { 2144 hw->fc_strict_ieee) {
2146 hw->fc = E1000_FC_NONE; 2145 hw->fc = E1000_FC_NONE;
2147 DEBUGOUT("Flow Control = NONE.\n"); 2146 e_dbg("Flow Control = NONE.\n");
2148 } else { 2147 } else {
2149 hw->fc = E1000_FC_RX_PAUSE; 2148 hw->fc = E1000_FC_RX_PAUSE;
2150 DEBUGOUT 2149 e_dbg
2151 ("Flow Control = RX PAUSE frames only.\n"); 2150 ("Flow Control = RX PAUSE frames only.\n");
2152 } 2151 }
2153 2152
@@ -2158,7 +2157,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
2158 ret_val = 2157 ret_val =
2159 e1000_get_speed_and_duplex(hw, &speed, &duplex); 2158 e1000_get_speed_and_duplex(hw, &speed, &duplex);
2160 if (ret_val) { 2159 if (ret_val) {
2161 DEBUGOUT 2160 e_dbg
2162 ("Error getting link speed and duplex\n"); 2161 ("Error getting link speed and duplex\n");
2163 return ret_val; 2162 return ret_val;
2164 } 2163 }
@@ -2171,12 +2170,12 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
2171 */ 2170 */
2172 ret_val = e1000_force_mac_fc(hw); 2171 ret_val = e1000_force_mac_fc(hw);
2173 if (ret_val) { 2172 if (ret_val) {
2174 DEBUGOUT 2173 e_dbg
2175 ("Error forcing flow control settings\n"); 2174 ("Error forcing flow control settings\n");
2176 return ret_val; 2175 return ret_val;
2177 } 2176 }
2178 } else { 2177 } else {
2179 DEBUGOUT 2178 e_dbg
2180 ("Copper PHY and Auto Neg has not completed.\n"); 2179 ("Copper PHY and Auto Neg has not completed.\n");
2181 } 2180 }
2182 } 2181 }
@@ -2197,7 +2196,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
2197 u32 status; 2196 u32 status;
2198 s32 ret_val = E1000_SUCCESS; 2197 s32 ret_val = E1000_SUCCESS;
2199 2198
2200 DEBUGFUNC("e1000_check_for_serdes_link_generic"); 2199 e_dbg("e1000_check_for_serdes_link_generic");
2201 2200
2202 ctrl = er32(CTRL); 2201 ctrl = er32(CTRL);
2203 status = er32(STATUS); 2202 status = er32(STATUS);
@@ -2216,7 +2215,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
2216 hw->autoneg_failed = 1; 2215 hw->autoneg_failed = 1;
2217 goto out; 2216 goto out;
2218 } 2217 }
2219 DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n"); 2218 e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
2220 2219
2221 /* Disable auto-negotiation in the TXCW register */ 2220 /* Disable auto-negotiation in the TXCW register */
2222 ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE)); 2221 ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE));
@@ -2229,7 +2228,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
2229 /* Configure Flow Control after forcing link up. */ 2228 /* Configure Flow Control after forcing link up. */
2230 ret_val = e1000_config_fc_after_link_up(hw); 2229 ret_val = e1000_config_fc_after_link_up(hw);
2231 if (ret_val) { 2230 if (ret_val) {
2232 DEBUGOUT("Error configuring flow control\n"); 2231 e_dbg("Error configuring flow control\n");
2233 goto out; 2232 goto out;
2234 } 2233 }
2235 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { 2234 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
@@ -2239,7 +2238,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
2239 * and disable forced link in the Device Control register 2238 * and disable forced link in the Device Control register
2240 * in an attempt to auto-negotiate with our link partner. 2239 * in an attempt to auto-negotiate with our link partner.
2241 */ 2240 */
2242 DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n"); 2241 e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
2243 ew32(TXCW, hw->txcw); 2242 ew32(TXCW, hw->txcw);
2244 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); 2243 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
2245 2244
@@ -2256,11 +2255,11 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
2256 if (rxcw & E1000_RXCW_SYNCH) { 2255 if (rxcw & E1000_RXCW_SYNCH) {
2257 if (!(rxcw & E1000_RXCW_IV)) { 2256 if (!(rxcw & E1000_RXCW_IV)) {
2258 hw->serdes_has_link = true; 2257 hw->serdes_has_link = true;
2259 DEBUGOUT("SERDES: Link up - forced.\n"); 2258 e_dbg("SERDES: Link up - forced.\n");
2260 } 2259 }
2261 } else { 2260 } else {
2262 hw->serdes_has_link = false; 2261 hw->serdes_has_link = false;
2263 DEBUGOUT("SERDES: Link down - force failed.\n"); 2262 e_dbg("SERDES: Link down - force failed.\n");
2264 } 2263 }
2265 } 2264 }
2266 2265
@@ -2273,20 +2272,20 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
2273 if (rxcw & E1000_RXCW_SYNCH) { 2272 if (rxcw & E1000_RXCW_SYNCH) {
2274 if (!(rxcw & E1000_RXCW_IV)) { 2273 if (!(rxcw & E1000_RXCW_IV)) {
2275 hw->serdes_has_link = true; 2274 hw->serdes_has_link = true;
2276 DEBUGOUT("SERDES: Link up - autoneg " 2275 e_dbg("SERDES: Link up - autoneg "
2277 "completed successfully.\n"); 2276 "completed successfully.\n");
2278 } else { 2277 } else {
2279 hw->serdes_has_link = false; 2278 hw->serdes_has_link = false;
2280 DEBUGOUT("SERDES: Link down - invalid" 2279 e_dbg("SERDES: Link down - invalid"
2281 "codewords detected in autoneg.\n"); 2280 "codewords detected in autoneg.\n");
2282 } 2281 }
2283 } else { 2282 } else {
2284 hw->serdes_has_link = false; 2283 hw->serdes_has_link = false;
2285 DEBUGOUT("SERDES: Link down - no sync.\n"); 2284 e_dbg("SERDES: Link down - no sync.\n");
2286 } 2285 }
2287 } else { 2286 } else {
2288 hw->serdes_has_link = false; 2287 hw->serdes_has_link = false;
2289 DEBUGOUT("SERDES: Link down - autoneg failed\n"); 2288 e_dbg("SERDES: Link down - autoneg failed\n");
2290 } 2289 }
2291 } 2290 }
2292 2291
@@ -2312,7 +2311,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
2312 s32 ret_val; 2311 s32 ret_val;
2313 u16 phy_data; 2312 u16 phy_data;
2314 2313
2315 DEBUGFUNC("e1000_check_for_link"); 2314 e_dbg("e1000_check_for_link");
2316 2315
2317 ctrl = er32(CTRL); 2316 ctrl = er32(CTRL);
2318 status = er32(STATUS); 2317 status = er32(STATUS);
@@ -2407,7 +2406,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
2407 else { 2406 else {
2408 ret_val = e1000_config_mac_to_phy(hw); 2407 ret_val = e1000_config_mac_to_phy(hw);
2409 if (ret_val) { 2408 if (ret_val) {
2410 DEBUGOUT 2409 e_dbg
2411 ("Error configuring MAC to PHY settings\n"); 2410 ("Error configuring MAC to PHY settings\n");
2412 return ret_val; 2411 return ret_val;
2413 } 2412 }
@@ -2419,7 +2418,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
2419 */ 2418 */
2420 ret_val = e1000_config_fc_after_link_up(hw); 2419 ret_val = e1000_config_fc_after_link_up(hw);
2421 if (ret_val) { 2420 if (ret_val) {
2422 DEBUGOUT("Error configuring flow control\n"); 2421 e_dbg("Error configuring flow control\n");
2423 return ret_val; 2422 return ret_val;
2424 } 2423 }
2425 2424
@@ -2435,7 +2434,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
2435 ret_val = 2434 ret_val =
2436 e1000_get_speed_and_duplex(hw, &speed, &duplex); 2435 e1000_get_speed_and_duplex(hw, &speed, &duplex);
2437 if (ret_val) { 2436 if (ret_val) {
2438 DEBUGOUT 2437 e_dbg
2439 ("Error getting link speed and duplex\n"); 2438 ("Error getting link speed and duplex\n");
2440 return ret_val; 2439 return ret_val;
2441 } 2440 }
@@ -2487,30 +2486,30 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
2487 s32 ret_val; 2486 s32 ret_val;
2488 u16 phy_data; 2487 u16 phy_data;
2489 2488
2490 DEBUGFUNC("e1000_get_speed_and_duplex"); 2489 e_dbg("e1000_get_speed_and_duplex");
2491 2490
2492 if (hw->mac_type >= e1000_82543) { 2491 if (hw->mac_type >= e1000_82543) {
2493 status = er32(STATUS); 2492 status = er32(STATUS);
2494 if (status & E1000_STATUS_SPEED_1000) { 2493 if (status & E1000_STATUS_SPEED_1000) {
2495 *speed = SPEED_1000; 2494 *speed = SPEED_1000;
2496 DEBUGOUT("1000 Mbs, "); 2495 e_dbg("1000 Mbs, ");
2497 } else if (status & E1000_STATUS_SPEED_100) { 2496 } else if (status & E1000_STATUS_SPEED_100) {
2498 *speed = SPEED_100; 2497 *speed = SPEED_100;
2499 DEBUGOUT("100 Mbs, "); 2498 e_dbg("100 Mbs, ");
2500 } else { 2499 } else {
2501 *speed = SPEED_10; 2500 *speed = SPEED_10;
2502 DEBUGOUT("10 Mbs, "); 2501 e_dbg("10 Mbs, ");
2503 } 2502 }
2504 2503
2505 if (status & E1000_STATUS_FD) { 2504 if (status & E1000_STATUS_FD) {
2506 *duplex = FULL_DUPLEX; 2505 *duplex = FULL_DUPLEX;
2507 DEBUGOUT("Full Duplex\n"); 2506 e_dbg("Full Duplex\n");
2508 } else { 2507 } else {
2509 *duplex = HALF_DUPLEX; 2508 *duplex = HALF_DUPLEX;
2510 DEBUGOUT(" Half Duplex\n"); 2509 e_dbg(" Half Duplex\n");
2511 } 2510 }
2512 } else { 2511 } else {
2513 DEBUGOUT("1000 Mbs, Full Duplex\n"); 2512 e_dbg("1000 Mbs, Full Duplex\n");
2514 *speed = SPEED_1000; 2513 *speed = SPEED_1000;
2515 *duplex = FULL_DUPLEX; 2514 *duplex = FULL_DUPLEX;
2516 } 2515 }
@@ -2554,8 +2553,8 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
2554 u16 i; 2553 u16 i;
2555 u16 phy_data; 2554 u16 phy_data;
2556 2555
2557 DEBUGFUNC("e1000_wait_autoneg"); 2556 e_dbg("e1000_wait_autoneg");
2558 DEBUGOUT("Waiting for Auto-Neg to complete.\n"); 2557 e_dbg("Waiting for Auto-Neg to complete.\n");
2559 2558
2560 /* We will wait for autoneg to complete or 4.5 seconds to expire. */ 2559 /* We will wait for autoneg to complete or 4.5 seconds to expire. */
2561 for (i = PHY_AUTO_NEG_TIME; i > 0; i--) { 2560 for (i = PHY_AUTO_NEG_TIME; i > 0; i--) {
@@ -2718,7 +2717,7 @@ s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
2718{ 2717{
2719 u32 ret_val; 2718 u32 ret_val;
2720 2719
2721 DEBUGFUNC("e1000_read_phy_reg"); 2720 e_dbg("e1000_read_phy_reg");
2722 2721
2723 if ((hw->phy_type == e1000_phy_igp) && 2722 if ((hw->phy_type == e1000_phy_igp) &&
2724 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 2723 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
@@ -2741,10 +2740,10 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2741 u32 mdic = 0; 2740 u32 mdic = 0;
2742 const u32 phy_addr = 1; 2741 const u32 phy_addr = 1;
2743 2742
2744 DEBUGFUNC("e1000_read_phy_reg_ex"); 2743 e_dbg("e1000_read_phy_reg_ex");
2745 2744
2746 if (reg_addr > MAX_PHY_REG_ADDRESS) { 2745 if (reg_addr > MAX_PHY_REG_ADDRESS) {
2747 DEBUGOUT1("PHY Address %d is out of range\n", reg_addr); 2746 e_dbg("PHY Address %d is out of range\n", reg_addr);
2748 return -E1000_ERR_PARAM; 2747 return -E1000_ERR_PARAM;
2749 } 2748 }
2750 2749
@@ -2767,11 +2766,11 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2767 break; 2766 break;
2768 } 2767 }
2769 if (!(mdic & E1000_MDIC_READY)) { 2768 if (!(mdic & E1000_MDIC_READY)) {
2770 DEBUGOUT("MDI Read did not complete\n"); 2769 e_dbg("MDI Read did not complete\n");
2771 return -E1000_ERR_PHY; 2770 return -E1000_ERR_PHY;
2772 } 2771 }
2773 if (mdic & E1000_MDIC_ERROR) { 2772 if (mdic & E1000_MDIC_ERROR) {
2774 DEBUGOUT("MDI Error\n"); 2773 e_dbg("MDI Error\n");
2775 return -E1000_ERR_PHY; 2774 return -E1000_ERR_PHY;
2776 } 2775 }
2777 *phy_data = (u16) mdic; 2776 *phy_data = (u16) mdic;
@@ -2820,7 +2819,7 @@ s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
2820{ 2819{
2821 u32 ret_val; 2820 u32 ret_val;
2822 2821
2823 DEBUGFUNC("e1000_write_phy_reg"); 2822 e_dbg("e1000_write_phy_reg");
2824 2823
2825 if ((hw->phy_type == e1000_phy_igp) && 2824 if ((hw->phy_type == e1000_phy_igp) &&
2826 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 2825 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
@@ -2843,10 +2842,10 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2843 u32 mdic = 0; 2842 u32 mdic = 0;
2844 const u32 phy_addr = 1; 2843 const u32 phy_addr = 1;
2845 2844
2846 DEBUGFUNC("e1000_write_phy_reg_ex"); 2845 e_dbg("e1000_write_phy_reg_ex");
2847 2846
2848 if (reg_addr > MAX_PHY_REG_ADDRESS) { 2847 if (reg_addr > MAX_PHY_REG_ADDRESS) {
2849 DEBUGOUT1("PHY Address %d is out of range\n", reg_addr); 2848 e_dbg("PHY Address %d is out of range\n", reg_addr);
2850 return -E1000_ERR_PARAM; 2849 return -E1000_ERR_PARAM;
2851 } 2850 }
2852 2851
@@ -2870,7 +2869,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2870 break; 2869 break;
2871 } 2870 }
2872 if (!(mdic & E1000_MDIC_READY)) { 2871 if (!(mdic & E1000_MDIC_READY)) {
2873 DEBUGOUT("MDI Write did not complete\n"); 2872 e_dbg("MDI Write did not complete\n");
2874 return -E1000_ERR_PHY; 2873 return -E1000_ERR_PHY;
2875 } 2874 }
2876 } else { 2875 } else {
@@ -2910,9 +2909,9 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
2910 u32 led_ctrl; 2909 u32 led_ctrl;
2911 s32 ret_val; 2910 s32 ret_val;
2912 2911
2913 DEBUGFUNC("e1000_phy_hw_reset"); 2912 e_dbg("e1000_phy_hw_reset");
2914 2913
2915 DEBUGOUT("Resetting Phy...\n"); 2914 e_dbg("Resetting Phy...\n");
2916 2915
2917 if (hw->mac_type > e1000_82543) { 2916 if (hw->mac_type > e1000_82543) {
2918 /* Read the device control register and assert the E1000_CTRL_PHY_RST 2917 /* Read the device control register and assert the E1000_CTRL_PHY_RST
@@ -2973,7 +2972,7 @@ s32 e1000_phy_reset(struct e1000_hw *hw)
2973 s32 ret_val; 2972 s32 ret_val;
2974 u16 phy_data; 2973 u16 phy_data;
2975 2974
2976 DEBUGFUNC("e1000_phy_reset"); 2975 e_dbg("e1000_phy_reset");
2977 2976
2978 switch (hw->phy_type) { 2977 switch (hw->phy_type) {
2979 case e1000_phy_igp: 2978 case e1000_phy_igp:
@@ -3013,7 +3012,7 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
3013 u16 phy_id_high, phy_id_low; 3012 u16 phy_id_high, phy_id_low;
3014 bool match = false; 3013 bool match = false;
3015 3014
3016 DEBUGFUNC("e1000_detect_gig_phy"); 3015 e_dbg("e1000_detect_gig_phy");
3017 3016
3018 if (hw->phy_id != 0) 3017 if (hw->phy_id != 0)
3019 return E1000_SUCCESS; 3018 return E1000_SUCCESS;
@@ -3057,16 +3056,16 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
3057 match = true; 3056 match = true;
3058 break; 3057 break;
3059 default: 3058 default:
3060 DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type); 3059 e_dbg("Invalid MAC type %d\n", hw->mac_type);
3061 return -E1000_ERR_CONFIG; 3060 return -E1000_ERR_CONFIG;
3062 } 3061 }
3063 phy_init_status = e1000_set_phy_type(hw); 3062 phy_init_status = e1000_set_phy_type(hw);
3064 3063
3065 if ((match) && (phy_init_status == E1000_SUCCESS)) { 3064 if ((match) && (phy_init_status == E1000_SUCCESS)) {
3066 DEBUGOUT1("PHY ID 0x%X detected\n", hw->phy_id); 3065 e_dbg("PHY ID 0x%X detected\n", hw->phy_id);
3067 return E1000_SUCCESS; 3066 return E1000_SUCCESS;
3068 } 3067 }
3069 DEBUGOUT1("Invalid PHY ID 0x%X\n", hw->phy_id); 3068 e_dbg("Invalid PHY ID 0x%X\n", hw->phy_id);
3070 return -E1000_ERR_PHY; 3069 return -E1000_ERR_PHY;
3071} 3070}
3072 3071
@@ -3079,7 +3078,7 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
3079static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) 3078static s32 e1000_phy_reset_dsp(struct e1000_hw *hw)
3080{ 3079{
3081 s32 ret_val; 3080 s32 ret_val;
3082 DEBUGFUNC("e1000_phy_reset_dsp"); 3081 e_dbg("e1000_phy_reset_dsp");
3083 3082
3084 do { 3083 do {
3085 ret_val = e1000_write_phy_reg(hw, 29, 0x001d); 3084 ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
@@ -3111,7 +3110,7 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
3111 u16 phy_data, min_length, max_length, average; 3110 u16 phy_data, min_length, max_length, average;
3112 e1000_rev_polarity polarity; 3111 e1000_rev_polarity polarity;
3113 3112
3114 DEBUGFUNC("e1000_phy_igp_get_info"); 3113 e_dbg("e1000_phy_igp_get_info");
3115 3114
3116 /* The downshift status is checked only once, after link is established, 3115 /* The downshift status is checked only once, after link is established,
3117 * and it stored in the hw->speed_downgraded parameter. */ 3116 * and it stored in the hw->speed_downgraded parameter. */
@@ -3189,7 +3188,7 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
3189 u16 phy_data; 3188 u16 phy_data;
3190 e1000_rev_polarity polarity; 3189 e1000_rev_polarity polarity;
3191 3190
3192 DEBUGFUNC("e1000_phy_m88_get_info"); 3191 e_dbg("e1000_phy_m88_get_info");
3193 3192
3194 /* The downshift status is checked only once, after link is established, 3193 /* The downshift status is checked only once, after link is established,
3195 * and it stored in the hw->speed_downgraded parameter. */ 3194 * and it stored in the hw->speed_downgraded parameter. */
@@ -3261,7 +3260,7 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
3261 s32 ret_val; 3260 s32 ret_val;
3262 u16 phy_data; 3261 u16 phy_data;
3263 3262
3264 DEBUGFUNC("e1000_phy_get_info"); 3263 e_dbg("e1000_phy_get_info");
3265 3264
3266 phy_info->cable_length = e1000_cable_length_undefined; 3265 phy_info->cable_length = e1000_cable_length_undefined;
3267 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; 3266 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
@@ -3273,7 +3272,7 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
3273 phy_info->remote_rx = e1000_1000t_rx_status_undefined; 3272 phy_info->remote_rx = e1000_1000t_rx_status_undefined;
3274 3273
3275 if (hw->media_type != e1000_media_type_copper) { 3274 if (hw->media_type != e1000_media_type_copper) {
3276 DEBUGOUT("PHY info is only valid for copper media\n"); 3275 e_dbg("PHY info is only valid for copper media\n");
3277 return -E1000_ERR_CONFIG; 3276 return -E1000_ERR_CONFIG;
3278 } 3277 }
3279 3278
@@ -3286,7 +3285,7 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
3286 return ret_val; 3285 return ret_val;
3287 3286
3288 if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) { 3287 if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
3289 DEBUGOUT("PHY info is only valid if link is up\n"); 3288 e_dbg("PHY info is only valid if link is up\n");
3290 return -E1000_ERR_CONFIG; 3289 return -E1000_ERR_CONFIG;
3291 } 3290 }
3292 3291
@@ -3298,10 +3297,10 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
3298 3297
3299s32 e1000_validate_mdi_setting(struct e1000_hw *hw) 3298s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
3300{ 3299{
3301 DEBUGFUNC("e1000_validate_mdi_settings"); 3300 e_dbg("e1000_validate_mdi_settings");
3302 3301
3303 if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { 3302 if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
3304 DEBUGOUT("Invalid MDI setting detected\n"); 3303 e_dbg("Invalid MDI setting detected\n");
3305 hw->mdix = 1; 3304 hw->mdix = 1;
3306 return -E1000_ERR_CONFIG; 3305 return -E1000_ERR_CONFIG;
3307 } 3306 }
@@ -3322,7 +3321,7 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
3322 s32 ret_val = E1000_SUCCESS; 3321 s32 ret_val = E1000_SUCCESS;
3323 u16 eeprom_size; 3322 u16 eeprom_size;
3324 3323
3325 DEBUGFUNC("e1000_init_eeprom_params"); 3324 e_dbg("e1000_init_eeprom_params");
3326 3325
3327 switch (hw->mac_type) { 3326 switch (hw->mac_type) {
3328 case e1000_82542_rev2_0: 3327 case e1000_82542_rev2_0:
@@ -3539,7 +3538,7 @@ static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
3539 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3538 struct e1000_eeprom_info *eeprom = &hw->eeprom;
3540 u32 eecd, i = 0; 3539 u32 eecd, i = 0;
3541 3540
3542 DEBUGFUNC("e1000_acquire_eeprom"); 3541 e_dbg("e1000_acquire_eeprom");
3543 3542
3544 eecd = er32(EECD); 3543 eecd = er32(EECD);
3545 3544
@@ -3557,7 +3556,7 @@ static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
3557 if (!(eecd & E1000_EECD_GNT)) { 3556 if (!(eecd & E1000_EECD_GNT)) {
3558 eecd &= ~E1000_EECD_REQ; 3557 eecd &= ~E1000_EECD_REQ;
3559 ew32(EECD, eecd); 3558 ew32(EECD, eecd);
3560 DEBUGOUT("Could not acquire EEPROM grant\n"); 3559 e_dbg("Could not acquire EEPROM grant\n");
3561 return -E1000_ERR_EEPROM; 3560 return -E1000_ERR_EEPROM;
3562 } 3561 }
3563 } 3562 }
@@ -3639,7 +3638,7 @@ static void e1000_release_eeprom(struct e1000_hw *hw)
3639{ 3638{
3640 u32 eecd; 3639 u32 eecd;
3641 3640
3642 DEBUGFUNC("e1000_release_eeprom"); 3641 e_dbg("e1000_release_eeprom");
3643 3642
3644 eecd = er32(EECD); 3643 eecd = er32(EECD);
3645 3644
@@ -3687,7 +3686,7 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
3687 u16 retry_count = 0; 3686 u16 retry_count = 0;
3688 u8 spi_stat_reg; 3687 u8 spi_stat_reg;
3689 3688
3690 DEBUGFUNC("e1000_spi_eeprom_ready"); 3689 e_dbg("e1000_spi_eeprom_ready");
3691 3690
3692 /* Read "Status Register" repeatedly until the LSB is cleared. The 3691 /* Read "Status Register" repeatedly until the LSB is cleared. The
3693 * EEPROM will signal that the command has been completed by clearing 3692 * EEPROM will signal that the command has been completed by clearing
@@ -3712,7 +3711,7 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
3712 * only 0-5mSec on 5V devices) 3711 * only 0-5mSec on 5V devices)
3713 */ 3712 */
3714 if (retry_count >= EEPROM_MAX_RETRY_SPI) { 3713 if (retry_count >= EEPROM_MAX_RETRY_SPI) {
3715 DEBUGOUT("SPI EEPROM Status error\n"); 3714 e_dbg("SPI EEPROM Status error\n");
3716 return -E1000_ERR_EEPROM; 3715 return -E1000_ERR_EEPROM;
3717 } 3716 }
3718 3717
@@ -3741,7 +3740,7 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
3741 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3740 struct e1000_eeprom_info *eeprom = &hw->eeprom;
3742 u32 i = 0; 3741 u32 i = 0;
3743 3742
3744 DEBUGFUNC("e1000_read_eeprom"); 3743 e_dbg("e1000_read_eeprom");
3745 3744
3746 /* If eeprom is not yet detected, do so now */ 3745 /* If eeprom is not yet detected, do so now */
3747 if (eeprom->word_size == 0) 3746 if (eeprom->word_size == 0)
@@ -3752,9 +3751,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
3752 */ 3751 */
3753 if ((offset >= eeprom->word_size) 3752 if ((offset >= eeprom->word_size)
3754 || (words > eeprom->word_size - offset) || (words == 0)) { 3753 || (words > eeprom->word_size - offset) || (words == 0)) {
3755 DEBUGOUT2 3754 e_dbg("\"words\" parameter out of bounds. Words = %d,"
3756 ("\"words\" parameter out of bounds. Words = %d, size = %d\n", 3755 "size = %d\n", offset, eeprom->word_size);
3757 offset, eeprom->word_size);
3758 return -E1000_ERR_EEPROM; 3756 return -E1000_ERR_EEPROM;
3759 } 3757 }
3760 3758
@@ -3832,11 +3830,11 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
3832 u16 checksum = 0; 3830 u16 checksum = 0;
3833 u16 i, eeprom_data; 3831 u16 i, eeprom_data;
3834 3832
3835 DEBUGFUNC("e1000_validate_eeprom_checksum"); 3833 e_dbg("e1000_validate_eeprom_checksum");
3836 3834
3837 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { 3835 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
3838 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { 3836 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
3839 DEBUGOUT("EEPROM Read Error\n"); 3837 e_dbg("EEPROM Read Error\n");
3840 return -E1000_ERR_EEPROM; 3838 return -E1000_ERR_EEPROM;
3841 } 3839 }
3842 checksum += eeprom_data; 3840 checksum += eeprom_data;
@@ -3845,7 +3843,7 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
3845 if (checksum == (u16) EEPROM_SUM) 3843 if (checksum == (u16) EEPROM_SUM)
3846 return E1000_SUCCESS; 3844 return E1000_SUCCESS;
3847 else { 3845 else {
3848 DEBUGOUT("EEPROM Checksum Invalid\n"); 3846 e_dbg("EEPROM Checksum Invalid\n");
3849 return -E1000_ERR_EEPROM; 3847 return -E1000_ERR_EEPROM;
3850 } 3848 }
3851} 3849}
@@ -3862,18 +3860,18 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
3862 u16 checksum = 0; 3860 u16 checksum = 0;
3863 u16 i, eeprom_data; 3861 u16 i, eeprom_data;
3864 3862
3865 DEBUGFUNC("e1000_update_eeprom_checksum"); 3863 e_dbg("e1000_update_eeprom_checksum");
3866 3864
3867 for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { 3865 for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
3868 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { 3866 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
3869 DEBUGOUT("EEPROM Read Error\n"); 3867 e_dbg("EEPROM Read Error\n");
3870 return -E1000_ERR_EEPROM; 3868 return -E1000_ERR_EEPROM;
3871 } 3869 }
3872 checksum += eeprom_data; 3870 checksum += eeprom_data;
3873 } 3871 }
3874 checksum = (u16) EEPROM_SUM - checksum; 3872 checksum = (u16) EEPROM_SUM - checksum;
3875 if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { 3873 if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
3876 DEBUGOUT("EEPROM Write Error\n"); 3874 e_dbg("EEPROM Write Error\n");
3877 return -E1000_ERR_EEPROM; 3875 return -E1000_ERR_EEPROM;
3878 } 3876 }
3879 return E1000_SUCCESS; 3877 return E1000_SUCCESS;
@@ -3904,7 +3902,7 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
3904 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3902 struct e1000_eeprom_info *eeprom = &hw->eeprom;
3905 s32 status = 0; 3903 s32 status = 0;
3906 3904
3907 DEBUGFUNC("e1000_write_eeprom"); 3905 e_dbg("e1000_write_eeprom");
3908 3906
3909 /* If eeprom is not yet detected, do so now */ 3907 /* If eeprom is not yet detected, do so now */
3910 if (eeprom->word_size == 0) 3908 if (eeprom->word_size == 0)
@@ -3915,7 +3913,7 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
3915 */ 3913 */
3916 if ((offset >= eeprom->word_size) 3914 if ((offset >= eeprom->word_size)
3917 || (words > eeprom->word_size - offset) || (words == 0)) { 3915 || (words > eeprom->word_size - offset) || (words == 0)) {
3918 DEBUGOUT("\"words\" parameter out of bounds\n"); 3916 e_dbg("\"words\" parameter out of bounds\n");
3919 return -E1000_ERR_EEPROM; 3917 return -E1000_ERR_EEPROM;
3920 } 3918 }
3921 3919
@@ -3949,7 +3947,7 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
3949 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3947 struct e1000_eeprom_info *eeprom = &hw->eeprom;
3950 u16 widx = 0; 3948 u16 widx = 0;
3951 3949
3952 DEBUGFUNC("e1000_write_eeprom_spi"); 3950 e_dbg("e1000_write_eeprom_spi");
3953 3951
3954 while (widx < words) { 3952 while (widx < words) {
3955 u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; 3953 u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
@@ -4013,7 +4011,7 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
4013 u16 words_written = 0; 4011 u16 words_written = 0;
4014 u16 i = 0; 4012 u16 i = 0;
4015 4013
4016 DEBUGFUNC("e1000_write_eeprom_microwire"); 4014 e_dbg("e1000_write_eeprom_microwire");
4017 4015
4018 /* Send the write enable command to the EEPROM (3-bit opcode plus 4016 /* Send the write enable command to the EEPROM (3-bit opcode plus
4019 * 6/8-bit dummy address beginning with 11). It's less work to include 4017 * 6/8-bit dummy address beginning with 11). It's less work to include
@@ -4056,7 +4054,7 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
4056 udelay(50); 4054 udelay(50);
4057 } 4055 }
4058 if (i == 200) { 4056 if (i == 200) {
4059 DEBUGOUT("EEPROM Write did not complete\n"); 4057 e_dbg("EEPROM Write did not complete\n");
4060 return -E1000_ERR_EEPROM; 4058 return -E1000_ERR_EEPROM;
4061 } 4059 }
4062 4060
@@ -4092,12 +4090,12 @@ s32 e1000_read_mac_addr(struct e1000_hw *hw)
4092 u16 offset; 4090 u16 offset;
4093 u16 eeprom_data, i; 4091 u16 eeprom_data, i;
4094 4092
4095 DEBUGFUNC("e1000_read_mac_addr"); 4093 e_dbg("e1000_read_mac_addr");
4096 4094
4097 for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { 4095 for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
4098 offset = i >> 1; 4096 offset = i >> 1;
4099 if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { 4097 if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
4100 DEBUGOUT("EEPROM Read Error\n"); 4098 e_dbg("EEPROM Read Error\n");
4101 return -E1000_ERR_EEPROM; 4099 return -E1000_ERR_EEPROM;
4102 } 4100 }
4103 hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF); 4101 hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF);
@@ -4132,17 +4130,17 @@ static void e1000_init_rx_addrs(struct e1000_hw *hw)
4132 u32 i; 4130 u32 i;
4133 u32 rar_num; 4131 u32 rar_num;
4134 4132
4135 DEBUGFUNC("e1000_init_rx_addrs"); 4133 e_dbg("e1000_init_rx_addrs");
4136 4134
4137 /* Setup the receive address. */ 4135 /* Setup the receive address. */
4138 DEBUGOUT("Programming MAC Address into RAR[0]\n"); 4136 e_dbg("Programming MAC Address into RAR[0]\n");
4139 4137
4140 e1000_rar_set(hw, hw->mac_addr, 0); 4138 e1000_rar_set(hw, hw->mac_addr, 0);
4141 4139
4142 rar_num = E1000_RAR_ENTRIES; 4140 rar_num = E1000_RAR_ENTRIES;
4143 4141
4144 /* Zero out the other 15 receive addresses. */ 4142 /* Zero out the other 15 receive addresses. */
4145 DEBUGOUT("Clearing RAR[1-15]\n"); 4143 e_dbg("Clearing RAR[1-15]\n");
4146 for (i = 1; i < rar_num; i++) { 4144 for (i = 1; i < rar_num; i++) {
4147 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 4145 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
4148 E1000_WRITE_FLUSH(); 4146 E1000_WRITE_FLUSH();
@@ -4290,7 +4288,7 @@ static s32 e1000_id_led_init(struct e1000_hw *hw)
4290 u16 eeprom_data, i, temp; 4288 u16 eeprom_data, i, temp;
4291 const u16 led_mask = 0x0F; 4289 const u16 led_mask = 0x0F;
4292 4290
4293 DEBUGFUNC("e1000_id_led_init"); 4291 e_dbg("e1000_id_led_init");
4294 4292
4295 if (hw->mac_type < e1000_82540) { 4293 if (hw->mac_type < e1000_82540) {
4296 /* Nothing to do */ 4294 /* Nothing to do */
@@ -4303,7 +4301,7 @@ static s32 e1000_id_led_init(struct e1000_hw *hw)
4303 hw->ledctl_mode2 = hw->ledctl_default; 4301 hw->ledctl_mode2 = hw->ledctl_default;
4304 4302
4305 if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) { 4303 if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
4306 DEBUGOUT("EEPROM Read Error\n"); 4304 e_dbg("EEPROM Read Error\n");
4307 return -E1000_ERR_EEPROM; 4305 return -E1000_ERR_EEPROM;
4308 } 4306 }
4309 4307
@@ -4363,7 +4361,7 @@ s32 e1000_setup_led(struct e1000_hw *hw)
4363 u32 ledctl; 4361 u32 ledctl;
4364 s32 ret_val = E1000_SUCCESS; 4362 s32 ret_val = E1000_SUCCESS;
4365 4363
4366 DEBUGFUNC("e1000_setup_led"); 4364 e_dbg("e1000_setup_led");
4367 4365
4368 switch (hw->mac_type) { 4366 switch (hw->mac_type) {
4369 case e1000_82542_rev2_0: 4367 case e1000_82542_rev2_0:
@@ -4415,7 +4413,7 @@ s32 e1000_cleanup_led(struct e1000_hw *hw)
4415{ 4413{
4416 s32 ret_val = E1000_SUCCESS; 4414 s32 ret_val = E1000_SUCCESS;
4417 4415
4418 DEBUGFUNC("e1000_cleanup_led"); 4416 e_dbg("e1000_cleanup_led");
4419 4417
4420 switch (hw->mac_type) { 4418 switch (hw->mac_type) {
4421 case e1000_82542_rev2_0: 4419 case e1000_82542_rev2_0:
@@ -4451,7 +4449,7 @@ s32 e1000_led_on(struct e1000_hw *hw)
4451{ 4449{
4452 u32 ctrl = er32(CTRL); 4450 u32 ctrl = er32(CTRL);
4453 4451
4454 DEBUGFUNC("e1000_led_on"); 4452 e_dbg("e1000_led_on");
4455 4453
4456 switch (hw->mac_type) { 4454 switch (hw->mac_type) {
4457 case e1000_82542_rev2_0: 4455 case e1000_82542_rev2_0:
@@ -4497,7 +4495,7 @@ s32 e1000_led_off(struct e1000_hw *hw)
4497{ 4495{
4498 u32 ctrl = er32(CTRL); 4496 u32 ctrl = er32(CTRL);
4499 4497
4500 DEBUGFUNC("e1000_led_off"); 4498 e_dbg("e1000_led_off");
4501 4499
4502 switch (hw->mac_type) { 4500 switch (hw->mac_type) {
4503 case e1000_82542_rev2_0: 4501 case e1000_82542_rev2_0:
@@ -4626,7 +4624,7 @@ static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
4626 */ 4624 */
4627void e1000_reset_adaptive(struct e1000_hw *hw) 4625void e1000_reset_adaptive(struct e1000_hw *hw)
4628{ 4626{
4629 DEBUGFUNC("e1000_reset_adaptive"); 4627 e_dbg("e1000_reset_adaptive");
4630 4628
4631 if (hw->adaptive_ifs) { 4629 if (hw->adaptive_ifs) {
4632 if (!hw->ifs_params_forced) { 4630 if (!hw->ifs_params_forced) {
@@ -4639,7 +4637,7 @@ void e1000_reset_adaptive(struct e1000_hw *hw)
4639 hw->in_ifs_mode = false; 4637 hw->in_ifs_mode = false;
4640 ew32(AIT, 0); 4638 ew32(AIT, 0);
4641 } else { 4639 } else {
4642 DEBUGOUT("Not in Adaptive IFS mode!\n"); 4640 e_dbg("Not in Adaptive IFS mode!\n");
4643 } 4641 }
4644} 4642}
4645 4643
@@ -4654,7 +4652,7 @@ void e1000_reset_adaptive(struct e1000_hw *hw)
4654 */ 4652 */
4655void e1000_update_adaptive(struct e1000_hw *hw) 4653void e1000_update_adaptive(struct e1000_hw *hw)
4656{ 4654{
4657 DEBUGFUNC("e1000_update_adaptive"); 4655 e_dbg("e1000_update_adaptive");
4658 4656
4659 if (hw->adaptive_ifs) { 4657 if (hw->adaptive_ifs) {
4660 if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) { 4658 if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) {
@@ -4679,7 +4677,7 @@ void e1000_update_adaptive(struct e1000_hw *hw)
4679 } 4677 }
4680 } 4678 }
4681 } else { 4679 } else {
4682 DEBUGOUT("Not in Adaptive IFS mode!\n"); 4680 e_dbg("Not in Adaptive IFS mode!\n");
4683 } 4681 }
4684} 4682}
4685 4683
@@ -4851,7 +4849,7 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
4851 u16 i, phy_data; 4849 u16 i, phy_data;
4852 u16 cable_length; 4850 u16 cable_length;
4853 4851
4854 DEBUGFUNC("e1000_get_cable_length"); 4852 e_dbg("e1000_get_cable_length");
4855 4853
4856 *min_length = *max_length = 0; 4854 *min_length = *max_length = 0;
4857 4855
@@ -4968,7 +4966,7 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
4968 s32 ret_val; 4966 s32 ret_val;
4969 u16 phy_data; 4967 u16 phy_data;
4970 4968
4971 DEBUGFUNC("e1000_check_polarity"); 4969 e_dbg("e1000_check_polarity");
4972 4970
4973 if (hw->phy_type == e1000_phy_m88) { 4971 if (hw->phy_type == e1000_phy_m88) {
4974 /* return the Polarity bit in the Status register. */ 4972 /* return the Polarity bit in the Status register. */
@@ -5034,7 +5032,7 @@ static s32 e1000_check_downshift(struct e1000_hw *hw)
5034 s32 ret_val; 5032 s32 ret_val;
5035 u16 phy_data; 5033 u16 phy_data;
5036 5034
5037 DEBUGFUNC("e1000_check_downshift"); 5035 e_dbg("e1000_check_downshift");
5038 5036
5039 if (hw->phy_type == e1000_phy_igp) { 5037 if (hw->phy_type == e1000_phy_igp) {
5040 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, 5038 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
@@ -5081,7 +5079,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
5081 }; 5079 };
5082 u16 min_length, max_length; 5080 u16 min_length, max_length;
5083 5081
5084 DEBUGFUNC("e1000_config_dsp_after_link_change"); 5082 e_dbg("e1000_config_dsp_after_link_change");
5085 5083
5086 if (hw->phy_type != e1000_phy_igp) 5084 if (hw->phy_type != e1000_phy_igp)
5087 return E1000_SUCCESS; 5085 return E1000_SUCCESS;
@@ -5089,7 +5087,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
5089 if (link_up) { 5087 if (link_up) {
5090 ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); 5088 ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
5091 if (ret_val) { 5089 if (ret_val) {
5092 DEBUGOUT("Error getting link speed and duplex\n"); 5090 e_dbg("Error getting link speed and duplex\n");
5093 return ret_val; 5091 return ret_val;
5094 } 5092 }
5095 5093
@@ -5289,7 +5287,7 @@ static s32 e1000_set_phy_mode(struct e1000_hw *hw)
5289 s32 ret_val; 5287 s32 ret_val;
5290 u16 eeprom_data; 5288 u16 eeprom_data;
5291 5289
5292 DEBUGFUNC("e1000_set_phy_mode"); 5290 e_dbg("e1000_set_phy_mode");
5293 5291
5294 if ((hw->mac_type == e1000_82545_rev_3) && 5292 if ((hw->mac_type == e1000_82545_rev_3) &&
5295 (hw->media_type == e1000_media_type_copper)) { 5293 (hw->media_type == e1000_media_type_copper)) {
@@ -5337,7 +5335,7 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
5337{ 5335{
5338 s32 ret_val; 5336 s32 ret_val;
5339 u16 phy_data; 5337 u16 phy_data;
5340 DEBUGFUNC("e1000_set_d3_lplu_state"); 5338 e_dbg("e1000_set_d3_lplu_state");
5341 5339
5342 if (hw->phy_type != e1000_phy_igp) 5340 if (hw->phy_type != e1000_phy_igp)
5343 return E1000_SUCCESS; 5341 return E1000_SUCCESS;
@@ -5440,7 +5438,7 @@ static s32 e1000_set_vco_speed(struct e1000_hw *hw)
5440 u16 default_page = 0; 5438 u16 default_page = 0;
5441 u16 phy_data; 5439 u16 phy_data;
5442 5440
5443 DEBUGFUNC("e1000_set_vco_speed"); 5441 e_dbg("e1000_set_vco_speed");
5444 5442
5445 switch (hw->mac_type) { 5443 switch (hw->mac_type) {
5446 case e1000_82545_rev_3: 5444 case e1000_82545_rev_3:
@@ -5613,7 +5611,7 @@ static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
5613 */ 5611 */
5614static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) 5612static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
5615{ 5613{
5616 DEBUGFUNC("e1000_get_auto_rd_done"); 5614 e_dbg("e1000_get_auto_rd_done");
5617 msleep(5); 5615 msleep(5);
5618 return E1000_SUCCESS; 5616 return E1000_SUCCESS;
5619} 5617}
@@ -5628,7 +5626,7 @@ static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
5628 */ 5626 */
5629static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) 5627static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
5630{ 5628{
5631 DEBUGFUNC("e1000_get_phy_cfg_done"); 5629 e_dbg("e1000_get_phy_cfg_done");
5632 mdelay(10); 5630 mdelay(10);
5633 return E1000_SUCCESS; 5631 return E1000_SUCCESS;
5634} 5632}
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 9acfddb0dafb..ecd9f6c6bcd5 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -35,6 +35,7 @@
35 35
36#include "e1000_osdep.h" 36#include "e1000_osdep.h"
37 37
38
38/* Forward declarations of structures used by the shared code */ 39/* Forward declarations of structures used by the shared code */
39struct e1000_hw; 40struct e1000_hw;
40struct e1000_hw_stats; 41struct e1000_hw_stats;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 974a02d81823..4dd2c23775cb 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -31,7 +31,7 @@
31 31
32char e1000_driver_name[] = "e1000"; 32char e1000_driver_name[] = "e1000";
33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
34#define DRV_VERSION "7.3.21-k5-NAPI" 34#define DRV_VERSION "7.3.21-k6-NAPI"
35const char e1000_driver_version[] = DRV_VERSION; 35const char e1000_driver_version[] = DRV_VERSION;
36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
37 37
@@ -214,6 +214,17 @@ module_param(debug, int, 0);
214MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 214MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
215 215
216/** 216/**
217 * e1000_get_hw_dev - return device
218 * used by hardware layer to print debugging information
219 *
220 **/
221struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
222{
223 struct e1000_adapter *adapter = hw->back;
224 return adapter->netdev;
225}
226
227/**
217 * e1000_init_module - Driver Registration Routine 228 * e1000_init_module - Driver Registration Routine
218 * 229 *
219 * e1000_init_module is the first routine called when the driver is 230 * e1000_init_module is the first routine called when the driver is
@@ -223,18 +234,17 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
223static int __init e1000_init_module(void) 234static int __init e1000_init_module(void)
224{ 235{
225 int ret; 236 int ret;
226 printk(KERN_INFO "%s - version %s\n", 237 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
227 e1000_driver_string, e1000_driver_version);
228 238
229 printk(KERN_INFO "%s\n", e1000_copyright); 239 pr_info("%s\n", e1000_copyright);
230 240
231 ret = pci_register_driver(&e1000_driver); 241 ret = pci_register_driver(&e1000_driver);
232 if (copybreak != COPYBREAK_DEFAULT) { 242 if (copybreak != COPYBREAK_DEFAULT) {
233 if (copybreak == 0) 243 if (copybreak == 0)
234 printk(KERN_INFO "e1000: copybreak disabled\n"); 244 pr_info("copybreak disabled\n");
235 else 245 else
236 printk(KERN_INFO "e1000: copybreak enabled for " 246 pr_info("copybreak enabled for "
237 "packets <= %u bytes\n", copybreak); 247 "packets <= %u bytes\n", copybreak);
238 } 248 }
239 return ret; 249 return ret;
240} 250}
@@ -265,8 +275,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
265 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 275 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
266 netdev); 276 netdev);
267 if (err) { 277 if (err) {
268 DPRINTK(PROBE, ERR, 278 e_err("Unable to allocate interrupt Error: %d\n", err);
269 "Unable to allocate interrupt Error: %d\n", err);
270 } 279 }
271 280
272 return err; 281 return err;
@@ -648,7 +657,7 @@ void e1000_reset(struct e1000_adapter *adapter)
648 ew32(WUC, 0); 657 ew32(WUC, 0);
649 658
650 if (e1000_init_hw(hw)) 659 if (e1000_init_hw(hw))
651 DPRINTK(PROBE, ERR, "Hardware Error\n"); 660 e_err("Hardware Error\n");
652 e1000_update_mng_vlan(adapter); 661 e1000_update_mng_vlan(adapter);
653 662
654 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ 663 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
@@ -689,8 +698,7 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter)
689 698
690 data = kmalloc(eeprom.len, GFP_KERNEL); 699 data = kmalloc(eeprom.len, GFP_KERNEL);
691 if (!data) { 700 if (!data) {
692 printk(KERN_ERR "Unable to allocate memory to dump EEPROM" 701 pr_err("Unable to allocate memory to dump EEPROM data\n");
693 " data\n");
694 return; 702 return;
695 } 703 }
696 704
@@ -702,30 +710,25 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter)
702 csum_new += data[i] + (data[i + 1] << 8); 710 csum_new += data[i] + (data[i + 1] << 8);
703 csum_new = EEPROM_SUM - csum_new; 711 csum_new = EEPROM_SUM - csum_new;
704 712
705 printk(KERN_ERR "/*********************/\n"); 713 pr_err("/*********************/\n");
706 printk(KERN_ERR "Current EEPROM Checksum : 0x%04x\n", csum_old); 714 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
707 printk(KERN_ERR "Calculated : 0x%04x\n", csum_new); 715 pr_err("Calculated : 0x%04x\n", csum_new);
708 716
709 printk(KERN_ERR "Offset Values\n"); 717 pr_err("Offset Values\n");
710 printk(KERN_ERR "======== ======\n"); 718 pr_err("======== ======\n");
711 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); 719 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
712 720
713 printk(KERN_ERR "Include this output when contacting your support " 721 pr_err("Include this output when contacting your support provider.\n");
714 "provider.\n"); 722 pr_err("This is not a software error! Something bad happened to\n");
715 printk(KERN_ERR "This is not a software error! Something bad " 723 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
716 "happened to your hardware or\n"); 724 pr_err("result in further problems, possibly loss of data,\n");
717 printk(KERN_ERR "EEPROM image. Ignoring this " 725 pr_err("corruption or system hangs!\n");
718 "problem could result in further problems,\n"); 726 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
719 printk(KERN_ERR "possibly loss of data, corruption or system hangs!\n"); 727 pr_err("which is invalid and requires you to set the proper MAC\n");
720 printk(KERN_ERR "The MAC Address will be reset to 00:00:00:00:00:00, " 728 pr_err("address manually before continuing to enable this network\n");
721 "which is invalid\n"); 729 pr_err("device. Please inspect the EEPROM dump and report the\n");
722 printk(KERN_ERR "and requires you to set the proper MAC " 730 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
723 "address manually before continuing\n"); 731 pr_err("/*********************/\n");
724 printk(KERN_ERR "to enable this network device.\n");
725 printk(KERN_ERR "Please inspect the EEPROM dump and report the issue "
726 "to your hardware vendor\n");
727 printk(KERN_ERR "or Intel Customer Support.\n");
728 printk(KERN_ERR "/*********************/\n");
729 732
730 kfree(data); 733 kfree(data);
731} 734}
@@ -823,16 +826,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
823 if (err) 826 if (err)
824 return err; 827 return err;
825 828
826 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 829 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
827 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 830 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
828 pci_using_dac = 1; 831 pci_using_dac = 1;
829 } else { 832 } else {
830 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 833 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
831 if (err) { 834 if (err) {
832 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 835 err = dma_set_coherent_mask(&pdev->dev,
836 DMA_BIT_MASK(32));
833 if (err) { 837 if (err) {
834 E1000_ERR("No usable DMA configuration, " 838 pr_err("No usable DMA config, aborting\n");
835 "aborting\n");
836 goto err_dma; 839 goto err_dma;
837 } 840 }
838 } 841 }
@@ -922,7 +925,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
922 925
923 /* initialize eeprom parameters */ 926 /* initialize eeprom parameters */
924 if (e1000_init_eeprom_params(hw)) { 927 if (e1000_init_eeprom_params(hw)) {
925 E1000_ERR("EEPROM initialization failed\n"); 928 e_err("EEPROM initialization failed\n");
926 goto err_eeprom; 929 goto err_eeprom;
927 } 930 }
928 931
@@ -933,7 +936,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
933 936
934 /* make sure the EEPROM is good */ 937 /* make sure the EEPROM is good */
935 if (e1000_validate_eeprom_checksum(hw) < 0) { 938 if (e1000_validate_eeprom_checksum(hw) < 0) {
936 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); 939 e_err("The EEPROM Checksum Is Not Valid\n");
937 e1000_dump_eeprom(adapter); 940 e1000_dump_eeprom(adapter);
938 /* 941 /*
939 * set MAC address to all zeroes to invalidate and temporary 942 * set MAC address to all zeroes to invalidate and temporary
@@ -947,14 +950,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
947 } else { 950 } else {
948 /* copy the MAC address out of the EEPROM */ 951 /* copy the MAC address out of the EEPROM */
949 if (e1000_read_mac_addr(hw)) 952 if (e1000_read_mac_addr(hw))
950 DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); 953 e_err("EEPROM Read Error\n");
951 } 954 }
952 /* don't block initalization here due to bad MAC address */ 955 /* don't block initalization here due to bad MAC address */
953 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); 956 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
954 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len); 957 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
955 958
956 if (!is_valid_ether_addr(netdev->perm_addr)) 959 if (!is_valid_ether_addr(netdev->perm_addr))
957 DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); 960 e_err("Invalid MAC Address\n");
958 961
959 e1000_get_bus_info(hw); 962 e1000_get_bus_info(hw);
960 963
@@ -1035,8 +1038,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1035 adapter->wol = adapter->eeprom_wol; 1038 adapter->wol = adapter->eeprom_wol;
1036 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1039 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1037 1040
1041 /* reset the hardware with the new settings */
1042 e1000_reset(adapter);
1043
1044 strcpy(netdev->name, "eth%d");
1045 err = register_netdev(netdev);
1046 if (err)
1047 goto err_register;
1048
1038 /* print bus type/speed/width info */ 1049 /* print bus type/speed/width info */
1039 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ", 1050 e_info("(PCI%s:%s:%s) ",
1040 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), 1051 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1041 ((hw->bus_speed == e1000_bus_speed_133) ? "133MHz" : 1052 ((hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
1042 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" : 1053 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
@@ -1044,20 +1055,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1044 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"), 1055 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
1045 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" : "32-bit")); 1056 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" : "32-bit"));
1046 1057
1047 printk("%pM\n", netdev->dev_addr); 1058 e_info("%pM\n", netdev->dev_addr);
1048
1049 /* reset the hardware with the new settings */
1050 e1000_reset(adapter);
1051
1052 strcpy(netdev->name, "eth%d");
1053 err = register_netdev(netdev);
1054 if (err)
1055 goto err_register;
1056 1059
1057 /* carrier off reporting is important to ethtool even BEFORE open */ 1060 /* carrier off reporting is important to ethtool even BEFORE open */
1058 netif_carrier_off(netdev); 1061 netif_carrier_off(netdev);
1059 1062
1060 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); 1063 e_info("Intel(R) PRO/1000 Network Connection\n");
1061 1064
1062 cards_found++; 1065 cards_found++;
1063 return 0; 1066 return 0;
@@ -1157,7 +1160,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1157 /* identify the MAC */ 1160 /* identify the MAC */
1158 1161
1159 if (e1000_set_mac_type(hw)) { 1162 if (e1000_set_mac_type(hw)) {
1160 DPRINTK(PROBE, ERR, "Unknown MAC Type\n"); 1163 e_err("Unknown MAC Type\n");
1161 return -EIO; 1164 return -EIO;
1162 } 1165 }
1163 1166
@@ -1190,7 +1193,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1190 adapter->num_rx_queues = 1; 1193 adapter->num_rx_queues = 1;
1191 1194
1192 if (e1000_alloc_queues(adapter)) { 1195 if (e1000_alloc_queues(adapter)) {
1193 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); 1196 e_err("Unable to allocate memory for queues\n");
1194 return -ENOMEM; 1197 return -ENOMEM;
1195 } 1198 }
1196 1199
@@ -1384,8 +1387,7 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1384 size = sizeof(struct e1000_buffer) * txdr->count; 1387 size = sizeof(struct e1000_buffer) * txdr->count;
1385 txdr->buffer_info = vmalloc(size); 1388 txdr->buffer_info = vmalloc(size);
1386 if (!txdr->buffer_info) { 1389 if (!txdr->buffer_info) {
1387 DPRINTK(PROBE, ERR, 1390 e_err("Unable to allocate memory for the Tx descriptor ring\n");
1388 "Unable to allocate memory for the transmit descriptor ring\n");
1389 return -ENOMEM; 1391 return -ENOMEM;
1390 } 1392 }
1391 memset(txdr->buffer_info, 0, size); 1393 memset(txdr->buffer_info, 0, size);
@@ -1395,12 +1397,12 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1395 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1397 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1396 txdr->size = ALIGN(txdr->size, 4096); 1398 txdr->size = ALIGN(txdr->size, 4096);
1397 1399
1398 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1400 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1401 GFP_KERNEL);
1399 if (!txdr->desc) { 1402 if (!txdr->desc) {
1400setup_tx_desc_die: 1403setup_tx_desc_die:
1401 vfree(txdr->buffer_info); 1404 vfree(txdr->buffer_info);
1402 DPRINTK(PROBE, ERR, 1405 e_err("Unable to allocate memory for the Tx descriptor ring\n");
1403 "Unable to allocate memory for the transmit descriptor ring\n");
1404 return -ENOMEM; 1406 return -ENOMEM;
1405 } 1407 }
1406 1408
@@ -1408,29 +1410,32 @@ setup_tx_desc_die:
1408 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1410 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1409 void *olddesc = txdr->desc; 1411 void *olddesc = txdr->desc;
1410 dma_addr_t olddma = txdr->dma; 1412 dma_addr_t olddma = txdr->dma;
1411 DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes " 1413 e_err("txdr align check failed: %u bytes at %p\n",
1412 "at %p\n", txdr->size, txdr->desc); 1414 txdr->size, txdr->desc);
1413 /* Try again, without freeing the previous */ 1415 /* Try again, without freeing the previous */
1414 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1416 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1417 &txdr->dma, GFP_KERNEL);
1415 /* Failed allocation, critical failure */ 1418 /* Failed allocation, critical failure */
1416 if (!txdr->desc) { 1419 if (!txdr->desc) {
1417 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1420 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1421 olddma);
1418 goto setup_tx_desc_die; 1422 goto setup_tx_desc_die;
1419 } 1423 }
1420 1424
1421 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1425 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1422 /* give up */ 1426 /* give up */
1423 pci_free_consistent(pdev, txdr->size, txdr->desc, 1427 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1424 txdr->dma); 1428 txdr->dma);
1425 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1429 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1426 DPRINTK(PROBE, ERR, 1430 olddma);
1427 "Unable to allocate aligned memory " 1431 e_err("Unable to allocate aligned memory "
1428 "for the transmit descriptor ring\n"); 1432 "for the transmit descriptor ring\n");
1429 vfree(txdr->buffer_info); 1433 vfree(txdr->buffer_info);
1430 return -ENOMEM; 1434 return -ENOMEM;
1431 } else { 1435 } else {
1432 /* Free old allocation, new allocation was successful */ 1436 /* Free old allocation, new allocation was successful */
1433 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1437 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1438 olddma);
1434 } 1439 }
1435 } 1440 }
1436 memset(txdr->desc, 0, txdr->size); 1441 memset(txdr->desc, 0, txdr->size);
@@ -1456,8 +1461,7 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1456 for (i = 0; i < adapter->num_tx_queues; i++) { 1461 for (i = 0; i < adapter->num_tx_queues; i++) {
1457 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); 1462 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1458 if (err) { 1463 if (err) {
1459 DPRINTK(PROBE, ERR, 1464 e_err("Allocation for Tx Queue %u failed\n", i);
1460 "Allocation for Tx Queue %u failed\n", i);
1461 for (i-- ; i >= 0; i--) 1465 for (i-- ; i >= 0; i--)
1462 e1000_free_tx_resources(adapter, 1466 e1000_free_tx_resources(adapter,
1463 &adapter->tx_ring[i]); 1467 &adapter->tx_ring[i]);
@@ -1577,8 +1581,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1577 size = sizeof(struct e1000_buffer) * rxdr->count; 1581 size = sizeof(struct e1000_buffer) * rxdr->count;
1578 rxdr->buffer_info = vmalloc(size); 1582 rxdr->buffer_info = vmalloc(size);
1579 if (!rxdr->buffer_info) { 1583 if (!rxdr->buffer_info) {
1580 DPRINTK(PROBE, ERR, 1584 e_err("Unable to allocate memory for the Rx descriptor ring\n");
1581 "Unable to allocate memory for the receive descriptor ring\n");
1582 return -ENOMEM; 1585 return -ENOMEM;
1583 } 1586 }
1584 memset(rxdr->buffer_info, 0, size); 1587 memset(rxdr->buffer_info, 0, size);
@@ -1590,11 +1593,11 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1590 rxdr->size = rxdr->count * desc_len; 1593 rxdr->size = rxdr->count * desc_len;
1591 rxdr->size = ALIGN(rxdr->size, 4096); 1594 rxdr->size = ALIGN(rxdr->size, 4096);
1592 1595
1593 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1596 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1597 GFP_KERNEL);
1594 1598
1595 if (!rxdr->desc) { 1599 if (!rxdr->desc) {
1596 DPRINTK(PROBE, ERR, 1600 e_err("Unable to allocate memory for the Rx descriptor ring\n");
1597 "Unable to allocate memory for the receive descriptor ring\n");
1598setup_rx_desc_die: 1601setup_rx_desc_die:
1599 vfree(rxdr->buffer_info); 1602 vfree(rxdr->buffer_info);
1600 return -ENOMEM; 1603 return -ENOMEM;
@@ -1604,31 +1607,33 @@ setup_rx_desc_die:
1604 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1607 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1605 void *olddesc = rxdr->desc; 1608 void *olddesc = rxdr->desc;
1606 dma_addr_t olddma = rxdr->dma; 1609 dma_addr_t olddma = rxdr->dma;
1607 DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes " 1610 e_err("rxdr align check failed: %u bytes at %p\n",
1608 "at %p\n", rxdr->size, rxdr->desc); 1611 rxdr->size, rxdr->desc);
1609 /* Try again, without freeing the previous */ 1612 /* Try again, without freeing the previous */
1610 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1613 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1614 &rxdr->dma, GFP_KERNEL);
1611 /* Failed allocation, critical failure */ 1615 /* Failed allocation, critical failure */
1612 if (!rxdr->desc) { 1616 if (!rxdr->desc) {
1613 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1617 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1614 DPRINTK(PROBE, ERR, 1618 olddma);
1615 "Unable to allocate memory " 1619 e_err("Unable to allocate memory for the Rx descriptor "
1616 "for the receive descriptor ring\n"); 1620 "ring\n");
1617 goto setup_rx_desc_die; 1621 goto setup_rx_desc_die;
1618 } 1622 }
1619 1623
1620 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1624 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1621 /* give up */ 1625 /* give up */
1622 pci_free_consistent(pdev, rxdr->size, rxdr->desc, 1626 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1623 rxdr->dma); 1627 rxdr->dma);
1624 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1628 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1625 DPRINTK(PROBE, ERR, 1629 olddma);
1626 "Unable to allocate aligned memory " 1630 e_err("Unable to allocate aligned memory for the Rx "
1627 "for the receive descriptor ring\n"); 1631 "descriptor ring\n");
1628 goto setup_rx_desc_die; 1632 goto setup_rx_desc_die;
1629 } else { 1633 } else {
1630 /* Free old allocation, new allocation was successful */ 1634 /* Free old allocation, new allocation was successful */
1631 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1635 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1636 olddma);
1632 } 1637 }
1633 } 1638 }
1634 memset(rxdr->desc, 0, rxdr->size); 1639 memset(rxdr->desc, 0, rxdr->size);
@@ -1655,8 +1660,7 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1655 for (i = 0; i < adapter->num_rx_queues; i++) { 1660 for (i = 0; i < adapter->num_rx_queues; i++) {
1656 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); 1661 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1657 if (err) { 1662 if (err) {
1658 DPRINTK(PROBE, ERR, 1663 e_err("Allocation for Rx Queue %u failed\n", i);
1659 "Allocation for Rx Queue %u failed\n", i);
1660 for (i-- ; i >= 0; i--) 1664 for (i-- ; i >= 0; i--)
1661 e1000_free_rx_resources(adapter, 1665 e1000_free_rx_resources(adapter,
1662 &adapter->rx_ring[i]); 1666 &adapter->rx_ring[i]);
@@ -1804,7 +1808,8 @@ static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1804 vfree(tx_ring->buffer_info); 1808 vfree(tx_ring->buffer_info);
1805 tx_ring->buffer_info = NULL; 1809 tx_ring->buffer_info = NULL;
1806 1810
1807 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 1811 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1812 tx_ring->dma);
1808 1813
1809 tx_ring->desc = NULL; 1814 tx_ring->desc = NULL;
1810} 1815}
@@ -1829,12 +1834,12 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1829{ 1834{
1830 if (buffer_info->dma) { 1835 if (buffer_info->dma) {
1831 if (buffer_info->mapped_as_page) 1836 if (buffer_info->mapped_as_page)
1832 pci_unmap_page(adapter->pdev, buffer_info->dma, 1837 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1833 buffer_info->length, PCI_DMA_TODEVICE); 1838 buffer_info->length, DMA_TO_DEVICE);
1834 else 1839 else
1835 pci_unmap_single(adapter->pdev, buffer_info->dma, 1840 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1836 buffer_info->length, 1841 buffer_info->length,
1837 PCI_DMA_TODEVICE); 1842 DMA_TO_DEVICE);
1838 buffer_info->dma = 0; 1843 buffer_info->dma = 0;
1839 } 1844 }
1840 if (buffer_info->skb) { 1845 if (buffer_info->skb) {
@@ -1912,7 +1917,8 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter,
1912 vfree(rx_ring->buffer_info); 1917 vfree(rx_ring->buffer_info);
1913 rx_ring->buffer_info = NULL; 1918 rx_ring->buffer_info = NULL;
1914 1919
1915 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 1920 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1921 rx_ring->dma);
1916 1922
1917 rx_ring->desc = NULL; 1923 rx_ring->desc = NULL;
1918} 1924}
@@ -1952,14 +1958,14 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
1952 buffer_info = &rx_ring->buffer_info[i]; 1958 buffer_info = &rx_ring->buffer_info[i];
1953 if (buffer_info->dma && 1959 if (buffer_info->dma &&
1954 adapter->clean_rx == e1000_clean_rx_irq) { 1960 adapter->clean_rx == e1000_clean_rx_irq) {
1955 pci_unmap_single(pdev, buffer_info->dma, 1961 dma_unmap_single(&pdev->dev, buffer_info->dma,
1956 buffer_info->length, 1962 buffer_info->length,
1957 PCI_DMA_FROMDEVICE); 1963 DMA_FROM_DEVICE);
1958 } else if (buffer_info->dma && 1964 } else if (buffer_info->dma &&
1959 adapter->clean_rx == e1000_clean_jumbo_rx_irq) { 1965 adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
1960 pci_unmap_page(pdev, buffer_info->dma, 1966 dma_unmap_page(&pdev->dev, buffer_info->dma,
1961 buffer_info->length, 1967 buffer_info->length,
1962 PCI_DMA_FROMDEVICE); 1968 DMA_FROM_DEVICE);
1963 } 1969 }
1964 1970
1965 buffer_info->dma = 0; 1971 buffer_info->dma = 0;
@@ -2105,7 +2111,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2105 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); 2111 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2106 2112
2107 if (!mcarray) { 2113 if (!mcarray) {
2108 DPRINTK(PROBE, ERR, "memory allocation failed\n"); 2114 e_err("memory allocation failed\n");
2109 return; 2115 return;
2110 } 2116 }
2111 2117
@@ -2301,16 +2307,16 @@ static void e1000_watchdog(unsigned long data)
2301 &adapter->link_duplex); 2307 &adapter->link_duplex);
2302 2308
2303 ctrl = er32(CTRL); 2309 ctrl = er32(CTRL);
2304 printk(KERN_INFO "e1000: %s NIC Link is Up %d Mbps %s, " 2310 pr_info("%s NIC Link is Up %d Mbps %s, "
2305 "Flow Control: %s\n", 2311 "Flow Control: %s\n",
2306 netdev->name, 2312 netdev->name,
2307 adapter->link_speed, 2313 adapter->link_speed,
2308 adapter->link_duplex == FULL_DUPLEX ? 2314 adapter->link_duplex == FULL_DUPLEX ?
2309 "Full Duplex" : "Half Duplex", 2315 "Full Duplex" : "Half Duplex",
2310 ((ctrl & E1000_CTRL_TFCE) && (ctrl & 2316 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2311 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & 2317 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2312 E1000_CTRL_RFCE) ? "RX" : ((ctrl & 2318 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2313 E1000_CTRL_TFCE) ? "TX" : "None" ))); 2319 E1000_CTRL_TFCE) ? "TX" : "None")));
2314 2320
2315 /* adjust timeout factor according to speed/duplex */ 2321 /* adjust timeout factor according to speed/duplex */
2316 adapter->tx_timeout_factor = 1; 2322 adapter->tx_timeout_factor = 1;
@@ -2340,8 +2346,8 @@ static void e1000_watchdog(unsigned long data)
2340 if (netif_carrier_ok(netdev)) { 2346 if (netif_carrier_ok(netdev)) {
2341 adapter->link_speed = 0; 2347 adapter->link_speed = 0;
2342 adapter->link_duplex = 0; 2348 adapter->link_duplex = 0;
2343 printk(KERN_INFO "e1000: %s NIC Link is Down\n", 2349 pr_info("%s NIC Link is Down\n",
2344 netdev->name); 2350 netdev->name);
2345 netif_carrier_off(netdev); 2351 netif_carrier_off(netdev);
2346 2352
2347 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2353 if (!test_bit(__E1000_DOWN, &adapter->flags))
@@ -2380,6 +2386,22 @@ link_up:
2380 } 2386 }
2381 } 2387 }
2382 2388
2389 /* Simple mode for Interrupt Throttle Rate (ITR) */
2390 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2391 /*
2392 * Symmetric Tx/Rx gets a reduced ITR=2000;
2393 * Total asymmetrical Tx or Rx gets ITR=8000;
2394 * everyone else is between 2000-8000.
2395 */
2396 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2397 u32 dif = (adapter->gotcl > adapter->gorcl ?
2398 adapter->gotcl - adapter->gorcl :
2399 adapter->gorcl - adapter->gotcl) / 10000;
2400 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2401
2402 ew32(ITR, 1000000000 / (itr * 256));
2403 }
2404
2383 /* Cause software interrupt to ensure rx ring is cleaned */ 2405 /* Cause software interrupt to ensure rx ring is cleaned */
2384 ew32(ICS, E1000_ICS_RXDMT0); 2406 ew32(ICS, E1000_ICS_RXDMT0);
2385 2407
@@ -2631,8 +2653,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
2631 break; 2653 break;
2632 default: 2654 default:
2633 if (unlikely(net_ratelimit())) 2655 if (unlikely(net_ratelimit()))
2634 DPRINTK(DRV, WARNING, 2656 e_warn("checksum_partial proto=%x!\n", skb->protocol);
2635 "checksum_partial proto=%x!\n", skb->protocol);
2636 break; 2657 break;
2637 } 2658 }
2638 2659
@@ -2714,9 +2735,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2714 /* set time_stamp *before* dma to help avoid a possible race */ 2735 /* set time_stamp *before* dma to help avoid a possible race */
2715 buffer_info->time_stamp = jiffies; 2736 buffer_info->time_stamp = jiffies;
2716 buffer_info->mapped_as_page = false; 2737 buffer_info->mapped_as_page = false;
2717 buffer_info->dma = pci_map_single(pdev, skb->data + offset, 2738 buffer_info->dma = dma_map_single(&pdev->dev,
2718 size, PCI_DMA_TODEVICE); 2739 skb->data + offset,
2719 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 2740 size, DMA_TO_DEVICE);
2741 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2720 goto dma_error; 2742 goto dma_error;
2721 buffer_info->next_to_watch = i; 2743 buffer_info->next_to_watch = i;
2722 2744
@@ -2760,10 +2782,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2760 buffer_info->length = size; 2782 buffer_info->length = size;
2761 buffer_info->time_stamp = jiffies; 2783 buffer_info->time_stamp = jiffies;
2762 buffer_info->mapped_as_page = true; 2784 buffer_info->mapped_as_page = true;
2763 buffer_info->dma = pci_map_page(pdev, frag->page, 2785 buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
2764 offset, size, 2786 offset, size,
2765 PCI_DMA_TODEVICE); 2787 DMA_TO_DEVICE);
2766 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 2788 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2767 goto dma_error; 2789 goto dma_error;
2768 buffer_info->next_to_watch = i; 2790 buffer_info->next_to_watch = i;
2769 2791
@@ -2975,8 +2997,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
2975 /* fall through */ 2997 /* fall through */
2976 pull_size = min((unsigned int)4, skb->data_len); 2998 pull_size = min((unsigned int)4, skb->data_len);
2977 if (!__pskb_pull_tail(skb, pull_size)) { 2999 if (!__pskb_pull_tail(skb, pull_size)) {
2978 DPRINTK(DRV, ERR, 3000 e_err("__pskb_pull_tail failed.\n");
2979 "__pskb_pull_tail failed.\n");
2980 dev_kfree_skb_any(skb); 3001 dev_kfree_skb_any(skb);
2981 return NETDEV_TX_OK; 3002 return NETDEV_TX_OK;
2982 } 3003 }
@@ -3124,7 +3145,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3124 3145
3125 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 3146 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3126 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3147 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3127 DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); 3148 e_err("Invalid MTU setting\n");
3128 return -EINVAL; 3149 return -EINVAL;
3129 } 3150 }
3130 3151
@@ -3132,7 +3153,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3132 switch (hw->mac_type) { 3153 switch (hw->mac_type) {
3133 case e1000_undefined ... e1000_82542_rev2_1: 3154 case e1000_undefined ... e1000_82542_rev2_1:
3134 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { 3155 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3135 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); 3156 e_err("Jumbo Frames not supported.\n");
3136 return -EINVAL; 3157 return -EINVAL;
3137 } 3158 }
3138 break; 3159 break;
@@ -3170,8 +3191,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3170 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) 3191 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3171 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 3192 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3172 3193
3173 printk(KERN_INFO "e1000: %s changing MTU from %d to %d\n", 3194 pr_info("%s changing MTU from %d to %d\n",
3174 netdev->name, netdev->mtu, new_mtu); 3195 netdev->name, netdev->mtu, new_mtu);
3175 netdev->mtu = new_mtu; 3196 netdev->mtu = new_mtu;
3176 3197
3177 if (netif_running(netdev)) 3198 if (netif_running(netdev))
@@ -3484,17 +3505,17 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3484 !(er32(STATUS) & E1000_STATUS_TXOFF)) { 3505 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3485 3506
3486 /* detected Tx unit hang */ 3507 /* detected Tx unit hang */
3487 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" 3508 e_err("Detected Tx Unit Hang\n"
3488 " Tx Queue <%lu>\n" 3509 " Tx Queue <%lu>\n"
3489 " TDH <%x>\n" 3510 " TDH <%x>\n"
3490 " TDT <%x>\n" 3511 " TDT <%x>\n"
3491 " next_to_use <%x>\n" 3512 " next_to_use <%x>\n"
3492 " next_to_clean <%x>\n" 3513 " next_to_clean <%x>\n"
3493 "buffer_info[next_to_clean]\n" 3514 "buffer_info[next_to_clean]\n"
3494 " time_stamp <%lx>\n" 3515 " time_stamp <%lx>\n"
3495 " next_to_watch <%x>\n" 3516 " next_to_watch <%x>\n"
3496 " jiffies <%lx>\n" 3517 " jiffies <%lx>\n"
3497 " next_to_watch.status <%x>\n", 3518 " next_to_watch.status <%x>\n",
3498 (unsigned long)((tx_ring - adapter->tx_ring) / 3519 (unsigned long)((tx_ring - adapter->tx_ring) /
3499 sizeof(struct e1000_tx_ring)), 3520 sizeof(struct e1000_tx_ring)),
3500 readl(hw->hw_addr + tx_ring->tdh), 3521 readl(hw->hw_addr + tx_ring->tdh),
@@ -3634,8 +3655,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
3634 3655
3635 cleaned = true; 3656 cleaned = true;
3636 cleaned_count++; 3657 cleaned_count++;
3637 pci_unmap_page(pdev, buffer_info->dma, buffer_info->length, 3658 dma_unmap_page(&pdev->dev, buffer_info->dma,
3638 PCI_DMA_FROMDEVICE); 3659 buffer_info->length, DMA_FROM_DEVICE);
3639 buffer_info->dma = 0; 3660 buffer_info->dma = 0;
3640 3661
3641 length = le16_to_cpu(rx_desc->length); 3662 length = le16_to_cpu(rx_desc->length);
@@ -3733,7 +3754,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
3733 3754
3734 /* eth type trans needs skb->data to point to something */ 3755 /* eth type trans needs skb->data to point to something */
3735 if (!pskb_may_pull(skb, ETH_HLEN)) { 3756 if (!pskb_may_pull(skb, ETH_HLEN)) {
3736 DPRINTK(DRV, ERR, "pskb_may_pull failed.\n"); 3757 e_err("pskb_may_pull failed.\n");
3737 dev_kfree_skb(skb); 3758 dev_kfree_skb(skb);
3738 goto next_desc; 3759 goto next_desc;
3739 } 3760 }
@@ -3817,8 +3838,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3817 3838
3818 cleaned = true; 3839 cleaned = true;
3819 cleaned_count++; 3840 cleaned_count++;
3820 pci_unmap_single(pdev, buffer_info->dma, buffer_info->length, 3841 dma_unmap_single(&pdev->dev, buffer_info->dma,
3821 PCI_DMA_FROMDEVICE); 3842 buffer_info->length, DMA_FROM_DEVICE);
3822 buffer_info->dma = 0; 3843 buffer_info->dma = 0;
3823 3844
3824 length = le16_to_cpu(rx_desc->length); 3845 length = le16_to_cpu(rx_desc->length);
@@ -3833,8 +3854,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3833 3854
3834 if (adapter->discarding) { 3855 if (adapter->discarding) {
3835 /* All receives must fit into a single buffer */ 3856 /* All receives must fit into a single buffer */
3836 E1000_DBG("%s: Receive packet consumed multiple" 3857 e_info("Receive packet consumed multiple buffers\n");
3837 " buffers\n", netdev->name);
3838 /* recycle */ 3858 /* recycle */
3839 buffer_info->skb = skb; 3859 buffer_info->skb = skb;
3840 if (status & E1000_RXD_STAT_EOP) 3860 if (status & E1000_RXD_STAT_EOP)
@@ -3964,8 +3984,8 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
3964 /* Fix for errata 23, can't cross 64kB boundary */ 3984 /* Fix for errata 23, can't cross 64kB boundary */
3965 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 3985 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
3966 struct sk_buff *oldskb = skb; 3986 struct sk_buff *oldskb = skb;
3967 DPRINTK(PROBE, ERR, "skb align check failed: %u bytes " 3987 e_err("skb align check failed: %u bytes at %p\n",
3968 "at %p\n", bufsz, skb->data); 3988 bufsz, skb->data);
3969 /* Try again, without freeing the previous */ 3989 /* Try again, without freeing the previous */
3970 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 3990 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
3971 /* Failed allocation, critical failure */ 3991 /* Failed allocation, critical failure */
@@ -3998,11 +4018,11 @@ check_page:
3998 } 4018 }
3999 4019
4000 if (!buffer_info->dma) { 4020 if (!buffer_info->dma) {
4001 buffer_info->dma = pci_map_page(pdev, 4021 buffer_info->dma = dma_map_page(&pdev->dev,
4002 buffer_info->page, 0, 4022 buffer_info->page, 0,
4003 buffer_info->length, 4023 buffer_info->length,
4004 PCI_DMA_FROMDEVICE); 4024 DMA_FROM_DEVICE);
4005 if (pci_dma_mapping_error(pdev, buffer_info->dma)) { 4025 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4006 put_page(buffer_info->page); 4026 put_page(buffer_info->page);
4007 dev_kfree_skb(skb); 4027 dev_kfree_skb(skb);
4008 buffer_info->page = NULL; 4028 buffer_info->page = NULL;
@@ -4073,8 +4093,8 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4073 /* Fix for errata 23, can't cross 64kB boundary */ 4093 /* Fix for errata 23, can't cross 64kB boundary */
4074 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 4094 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4075 struct sk_buff *oldskb = skb; 4095 struct sk_buff *oldskb = skb;
4076 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " 4096 e_err("skb align check failed: %u bytes at %p\n",
4077 "at %p\n", bufsz, skb->data); 4097 bufsz, skb->data);
4078 /* Try again, without freeing the previous */ 4098 /* Try again, without freeing the previous */
4079 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 4099 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4080 /* Failed allocation, critical failure */ 4100 /* Failed allocation, critical failure */
@@ -4098,11 +4118,11 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4098 buffer_info->skb = skb; 4118 buffer_info->skb = skb;
4099 buffer_info->length = adapter->rx_buffer_len; 4119 buffer_info->length = adapter->rx_buffer_len;
4100map_skb: 4120map_skb:
4101 buffer_info->dma = pci_map_single(pdev, 4121 buffer_info->dma = dma_map_single(&pdev->dev,
4102 skb->data, 4122 skb->data,
4103 buffer_info->length, 4123 buffer_info->length,
4104 PCI_DMA_FROMDEVICE); 4124 DMA_FROM_DEVICE);
4105 if (pci_dma_mapping_error(pdev, buffer_info->dma)) { 4125 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4106 dev_kfree_skb(skb); 4126 dev_kfree_skb(skb);
4107 buffer_info->skb = NULL; 4127 buffer_info->skb = NULL;
4108 buffer_info->dma = 0; 4128 buffer_info->dma = 0;
@@ -4119,16 +4139,15 @@ map_skb:
4119 if (!e1000_check_64k_bound(adapter, 4139 if (!e1000_check_64k_bound(adapter,
4120 (void *)(unsigned long)buffer_info->dma, 4140 (void *)(unsigned long)buffer_info->dma,
4121 adapter->rx_buffer_len)) { 4141 adapter->rx_buffer_len)) {
4122 DPRINTK(RX_ERR, ERR, 4142 e_err("dma align check failed: %u bytes at %p\n",
4123 "dma align check failed: %u bytes at %p\n", 4143 adapter->rx_buffer_len,
4124 adapter->rx_buffer_len, 4144 (void *)(unsigned long)buffer_info->dma);
4125 (void *)(unsigned long)buffer_info->dma);
4126 dev_kfree_skb(skb); 4145 dev_kfree_skb(skb);
4127 buffer_info->skb = NULL; 4146 buffer_info->skb = NULL;
4128 4147
4129 pci_unmap_single(pdev, buffer_info->dma, 4148 dma_unmap_single(&pdev->dev, buffer_info->dma,
4130 adapter->rx_buffer_len, 4149 adapter->rx_buffer_len,
4131 PCI_DMA_FROMDEVICE); 4150 DMA_FROM_DEVICE);
4132 buffer_info->dma = 0; 4151 buffer_info->dma = 0;
4133 4152
4134 adapter->alloc_rx_buff_failed++; 4153 adapter->alloc_rx_buff_failed++;
@@ -4334,7 +4353,7 @@ void e1000_pci_set_mwi(struct e1000_hw *hw)
4334 int ret_val = pci_set_mwi(adapter->pdev); 4353 int ret_val = pci_set_mwi(adapter->pdev);
4335 4354
4336 if (ret_val) 4355 if (ret_val)
4337 DPRINTK(PROBE, ERR, "Error in setting MWI\n"); 4356 e_err("Error in setting MWI\n");
4338} 4357}
4339 4358
4340void e1000_pci_clear_mwi(struct e1000_hw *hw) 4359void e1000_pci_clear_mwi(struct e1000_hw *hw)
@@ -4465,7 +4484,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
4465 /* Fiber NICs only allow 1000 gbps Full duplex */ 4484 /* Fiber NICs only allow 1000 gbps Full duplex */
4466 if ((hw->media_type == e1000_media_type_fiber) && 4485 if ((hw->media_type == e1000_media_type_fiber) &&
4467 spddplx != (SPEED_1000 + DUPLEX_FULL)) { 4486 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
4468 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); 4487 e_err("Unsupported Speed/Duplex configuration\n");
4469 return -EINVAL; 4488 return -EINVAL;
4470 } 4489 }
4471 4490
@@ -4488,7 +4507,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
4488 break; 4507 break;
4489 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 4508 case SPEED_1000 + DUPLEX_HALF: /* not supported */
4490 default: 4509 default:
4491 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); 4510 e_err("Unsupported Speed/Duplex configuration\n");
4492 return -EINVAL; 4511 return -EINVAL;
4493 } 4512 }
4494 return 0; 4513 return 0;
@@ -4611,7 +4630,7 @@ static int e1000_resume(struct pci_dev *pdev)
4611 else 4630 else
4612 err = pci_enable_device_mem(pdev); 4631 err = pci_enable_device_mem(pdev);
4613 if (err) { 4632 if (err) {
4614 printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n"); 4633 pr_err("Cannot enable PCI device from suspend\n");
4615 return err; 4634 return err;
4616 } 4635 }
4617 pci_set_master(pdev); 4636 pci_set_master(pdev);
@@ -4714,7 +4733,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4714 else 4733 else
4715 err = pci_enable_device_mem(pdev); 4734 err = pci_enable_device_mem(pdev);
4716 if (err) { 4735 if (err) {
4717 printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n"); 4736 pr_err("Cannot re-enable PCI device after reset.\n");
4718 return PCI_ERS_RESULT_DISCONNECT; 4737 return PCI_ERS_RESULT_DISCONNECT;
4719 } 4738 }
4720 pci_set_master(pdev); 4739 pci_set_master(pdev);
@@ -4745,7 +4764,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
4745 4764
4746 if (netif_running(netdev)) { 4765 if (netif_running(netdev)) {
4747 if (e1000_up(adapter)) { 4766 if (e1000_up(adapter)) {
4748 printk("e1000: can't bring device back up after reset\n"); 4767 pr_info("can't bring device back up after reset\n");
4749 return; 4768 return;
4750 } 4769 }
4751 } 4770 }
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index d9298522f5ae..edd1c75aa895 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -41,20 +41,6 @@
41#include <linux/interrupt.h> 41#include <linux/interrupt.h>
42#include <linux/sched.h> 42#include <linux/sched.h>
43 43
44#ifdef DBG
45#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
46#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
47#else
48#define DEBUGOUT(S)
49#define DEBUGOUT1(S, A...)
50#endif
51
52#define DEBUGFUNC(F) DEBUGOUT(F "\n")
53#define DEBUGOUT2 DEBUGOUT1
54#define DEBUGOUT3 DEBUGOUT2
55#define DEBUGOUT7 DEBUGOUT3
56
57
58#define er32(reg) \ 44#define er32(reg) \
59 (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ 45 (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \
60 ? E1000_##reg : E1000_82542_##reg))) 46 ? E1000_##reg : E1000_82542_##reg)))
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 38d2741ccae9..9fbb562dc964 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -226,17 +226,16 @@ static int __devinit e1000_validate_option(unsigned int *value,
226 case enable_option: 226 case enable_option:
227 switch (*value) { 227 switch (*value) {
228 case OPTION_ENABLED: 228 case OPTION_ENABLED:
229 DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name); 229 e_dev_info("%s Enabled\n", opt->name);
230 return 0; 230 return 0;
231 case OPTION_DISABLED: 231 case OPTION_DISABLED:
232 DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name); 232 e_dev_info("%s Disabled\n", opt->name);
233 return 0; 233 return 0;
234 } 234 }
235 break; 235 break;
236 case range_option: 236 case range_option:
237 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { 237 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
238 DPRINTK(PROBE, INFO, 238 e_dev_info("%s set to %i\n", opt->name, *value);
239 "%s set to %i\n", opt->name, *value);
240 return 0; 239 return 0;
241 } 240 }
242 break; 241 break;
@@ -248,7 +247,7 @@ static int __devinit e1000_validate_option(unsigned int *value,
248 ent = &opt->arg.l.p[i]; 247 ent = &opt->arg.l.p[i];
249 if (*value == ent->i) { 248 if (*value == ent->i) {
250 if (ent->str[0] != '\0') 249 if (ent->str[0] != '\0')
251 DPRINTK(PROBE, INFO, "%s\n", ent->str); 250 e_dev_info("%s\n", ent->str);
252 return 0; 251 return 0;
253 } 252 }
254 } 253 }
@@ -258,7 +257,7 @@ static int __devinit e1000_validate_option(unsigned int *value,
258 BUG(); 257 BUG();
259 } 258 }
260 259
261 DPRINTK(PROBE, INFO, "Invalid %s value specified (%i) %s\n", 260 e_dev_info("Invalid %s value specified (%i) %s\n",
262 opt->name, *value, opt->err); 261 opt->name, *value, opt->err);
263 *value = opt->def; 262 *value = opt->def;
264 return -1; 263 return -1;
@@ -283,9 +282,8 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
283 int bd = adapter->bd_number; 282 int bd = adapter->bd_number;
284 283
285 if (bd >= E1000_MAX_NIC) { 284 if (bd >= E1000_MAX_NIC) {
286 DPRINTK(PROBE, NOTICE, 285 e_dev_warn("Warning: no configuration for board #%i "
287 "Warning: no configuration for board #%i\n", bd); 286 "using defaults for all values\n", bd);
288 DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
289 } 287 }
290 288
291 { /* Transmit Descriptor Count */ 289 { /* Transmit Descriptor Count */
@@ -472,27 +470,31 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
472 adapter->itr = InterruptThrottleRate[bd]; 470 adapter->itr = InterruptThrottleRate[bd];
473 switch (adapter->itr) { 471 switch (adapter->itr) {
474 case 0: 472 case 0:
475 DPRINTK(PROBE, INFO, "%s turned off\n", 473 e_dev_info("%s turned off\n", opt.name);
476 opt.name);
477 break; 474 break;
478 case 1: 475 case 1:
479 DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", 476 e_dev_info("%s set to dynamic mode\n",
480 opt.name); 477 opt.name);
481 adapter->itr_setting = adapter->itr; 478 adapter->itr_setting = adapter->itr;
482 adapter->itr = 20000; 479 adapter->itr = 20000;
483 break; 480 break;
484 case 3: 481 case 3:
485 DPRINTK(PROBE, INFO, 482 e_dev_info("%s set to dynamic conservative "
486 "%s set to dynamic conservative mode\n", 483 "mode\n", opt.name);
487 opt.name);
488 adapter->itr_setting = adapter->itr; 484 adapter->itr_setting = adapter->itr;
489 adapter->itr = 20000; 485 adapter->itr = 20000;
490 break; 486 break;
487 case 4:
488 e_dev_info("%s set to simplified "
489 "(2000-8000) ints mode\n", opt.name);
490 adapter->itr_setting = adapter->itr;
491 break;
491 default: 492 default:
492 e1000_validate_option(&adapter->itr, &opt, 493 e1000_validate_option(&adapter->itr, &opt,
493 adapter); 494 adapter);
494 /* save the setting, because the dynamic bits change itr */ 495 /* save the setting, because the dynamic bits
495 /* clear the lower two bits because they are 496 * change itr.
497 * clear the lower two bits because they are
496 * used as control */ 498 * used as control */
497 adapter->itr_setting = adapter->itr & ~3; 499 adapter->itr_setting = adapter->itr & ~3;
498 break; 500 break;
@@ -543,19 +545,18 @@ static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
543{ 545{
544 int bd = adapter->bd_number; 546 int bd = adapter->bd_number;
545 if (num_Speed > bd) { 547 if (num_Speed > bd) {
546 DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, " 548 e_dev_info("Speed not valid for fiber adapters, parameter "
547 "parameter ignored\n"); 549 "ignored\n");
548 } 550 }
549 551
550 if (num_Duplex > bd) { 552 if (num_Duplex > bd) {
551 DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, " 553 e_dev_info("Duplex not valid for fiber adapters, parameter "
552 "parameter ignored\n"); 554 "ignored\n");
553 } 555 }
554 556
555 if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { 557 if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
556 DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is " 558 e_dev_info("AutoNeg other than 1000/Full is not valid for fiber"
557 "not valid for fiber adapters, " 559 "adapters, parameter ignored\n");
558 "parameter ignored\n");
559 } 560 }
560} 561}
561 562
@@ -619,9 +620,8 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
619 } 620 }
620 621
621 if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { 622 if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
622 DPRINTK(PROBE, INFO, 623 e_dev_info("AutoNeg specified along with Speed or Duplex, "
623 "AutoNeg specified along with Speed or Duplex, " 624 "parameter ignored\n");
624 "parameter ignored\n");
625 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; 625 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
626 } else { /* Autoneg */ 626 } else { /* Autoneg */
627 static const struct e1000_opt_list an_list[] = 627 static const struct e1000_opt_list an_list[] =
@@ -680,79 +680,72 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
680 case 0: 680 case 0:
681 adapter->hw.autoneg = adapter->fc_autoneg = 1; 681 adapter->hw.autoneg = adapter->fc_autoneg = 1;
682 if ((num_Speed > bd) && (speed != 0 || dplx != 0)) 682 if ((num_Speed > bd) && (speed != 0 || dplx != 0))
683 DPRINTK(PROBE, INFO, 683 e_dev_info("Speed and duplex autonegotiation "
684 "Speed and duplex autonegotiation enabled\n"); 684 "enabled\n");
685 break; 685 break;
686 case HALF_DUPLEX: 686 case HALF_DUPLEX:
687 DPRINTK(PROBE, INFO, "Half Duplex specified without Speed\n"); 687 e_dev_info("Half Duplex specified without Speed\n");
688 DPRINTK(PROBE, INFO, "Using Autonegotiation at " 688 e_dev_info("Using Autonegotiation at Half Duplex only\n");
689 "Half Duplex only\n");
690 adapter->hw.autoneg = adapter->fc_autoneg = 1; 689 adapter->hw.autoneg = adapter->fc_autoneg = 1;
691 adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | 690 adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
692 ADVERTISE_100_HALF; 691 ADVERTISE_100_HALF;
693 break; 692 break;
694 case FULL_DUPLEX: 693 case FULL_DUPLEX:
695 DPRINTK(PROBE, INFO, "Full Duplex specified without Speed\n"); 694 e_dev_info("Full Duplex specified without Speed\n");
696 DPRINTK(PROBE, INFO, "Using Autonegotiation at " 695 e_dev_info("Using Autonegotiation at Full Duplex only\n");
697 "Full Duplex only\n");
698 adapter->hw.autoneg = adapter->fc_autoneg = 1; 696 adapter->hw.autoneg = adapter->fc_autoneg = 1;
699 adapter->hw.autoneg_advertised = ADVERTISE_10_FULL | 697 adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
700 ADVERTISE_100_FULL | 698 ADVERTISE_100_FULL |
701 ADVERTISE_1000_FULL; 699 ADVERTISE_1000_FULL;
702 break; 700 break;
703 case SPEED_10: 701 case SPEED_10:
704 DPRINTK(PROBE, INFO, "10 Mbps Speed specified " 702 e_dev_info("10 Mbps Speed specified without Duplex\n");
705 "without Duplex\n"); 703 e_dev_info("Using Autonegotiation at 10 Mbps only\n");
706 DPRINTK(PROBE, INFO, "Using Autonegotiation at 10 Mbps only\n");
707 adapter->hw.autoneg = adapter->fc_autoneg = 1; 704 adapter->hw.autoneg = adapter->fc_autoneg = 1;
708 adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | 705 adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
709 ADVERTISE_10_FULL; 706 ADVERTISE_10_FULL;
710 break; 707 break;
711 case SPEED_10 + HALF_DUPLEX: 708 case SPEED_10 + HALF_DUPLEX:
712 DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Half Duplex\n"); 709 e_dev_info("Forcing to 10 Mbps Half Duplex\n");
713 adapter->hw.autoneg = adapter->fc_autoneg = 0; 710 adapter->hw.autoneg = adapter->fc_autoneg = 0;
714 adapter->hw.forced_speed_duplex = e1000_10_half; 711 adapter->hw.forced_speed_duplex = e1000_10_half;
715 adapter->hw.autoneg_advertised = 0; 712 adapter->hw.autoneg_advertised = 0;
716 break; 713 break;
717 case SPEED_10 + FULL_DUPLEX: 714 case SPEED_10 + FULL_DUPLEX:
718 DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Full Duplex\n"); 715 e_dev_info("Forcing to 10 Mbps Full Duplex\n");
719 adapter->hw.autoneg = adapter->fc_autoneg = 0; 716 adapter->hw.autoneg = adapter->fc_autoneg = 0;
720 adapter->hw.forced_speed_duplex = e1000_10_full; 717 adapter->hw.forced_speed_duplex = e1000_10_full;
721 adapter->hw.autoneg_advertised = 0; 718 adapter->hw.autoneg_advertised = 0;
722 break; 719 break;
723 case SPEED_100: 720 case SPEED_100:
724 DPRINTK(PROBE, INFO, "100 Mbps Speed specified " 721 e_dev_info("100 Mbps Speed specified without Duplex\n");
725 "without Duplex\n"); 722 e_dev_info("Using Autonegotiation at 100 Mbps only\n");
726 DPRINTK(PROBE, INFO, "Using Autonegotiation at "
727 "100 Mbps only\n");
728 adapter->hw.autoneg = adapter->fc_autoneg = 1; 723 adapter->hw.autoneg = adapter->fc_autoneg = 1;
729 adapter->hw.autoneg_advertised = ADVERTISE_100_HALF | 724 adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
730 ADVERTISE_100_FULL; 725 ADVERTISE_100_FULL;
731 break; 726 break;
732 case SPEED_100 + HALF_DUPLEX: 727 case SPEED_100 + HALF_DUPLEX:
733 DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Half Duplex\n"); 728 e_dev_info("Forcing to 100 Mbps Half Duplex\n");
734 adapter->hw.autoneg = adapter->fc_autoneg = 0; 729 adapter->hw.autoneg = adapter->fc_autoneg = 0;
735 adapter->hw.forced_speed_duplex = e1000_100_half; 730 adapter->hw.forced_speed_duplex = e1000_100_half;
736 adapter->hw.autoneg_advertised = 0; 731 adapter->hw.autoneg_advertised = 0;
737 break; 732 break;
738 case SPEED_100 + FULL_DUPLEX: 733 case SPEED_100 + FULL_DUPLEX:
739 DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Full Duplex\n"); 734 e_dev_info("Forcing to 100 Mbps Full Duplex\n");
740 adapter->hw.autoneg = adapter->fc_autoneg = 0; 735 adapter->hw.autoneg = adapter->fc_autoneg = 0;
741 adapter->hw.forced_speed_duplex = e1000_100_full; 736 adapter->hw.forced_speed_duplex = e1000_100_full;
742 adapter->hw.autoneg_advertised = 0; 737 adapter->hw.autoneg_advertised = 0;
743 break; 738 break;
744 case SPEED_1000: 739 case SPEED_1000:
745 DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without " 740 e_dev_info("1000 Mbps Speed specified without Duplex\n");
746 "Duplex\n");
747 goto full_duplex_only; 741 goto full_duplex_only;
748 case SPEED_1000 + HALF_DUPLEX: 742 case SPEED_1000 + HALF_DUPLEX:
749 DPRINTK(PROBE, INFO, 743 e_dev_info("Half Duplex is not supported at 1000 Mbps\n");
750 "Half Duplex is not supported at 1000 Mbps\n");
751 /* fall through */ 744 /* fall through */
752 case SPEED_1000 + FULL_DUPLEX: 745 case SPEED_1000 + FULL_DUPLEX:
753full_duplex_only: 746full_duplex_only:
754 DPRINTK(PROBE, INFO, 747 e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex "
755 "Using Autonegotiation at 1000 Mbps Full Duplex only\n"); 748 "only\n");
756 adapter->hw.autoneg = adapter->fc_autoneg = 1; 749 adapter->hw.autoneg = adapter->fc_autoneg = 1;
757 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; 750 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
758 break; 751 break;
@@ -762,9 +755,8 @@ full_duplex_only:
762 755
763 /* Speed, AutoNeg and MDI/MDI-X must all play nice */ 756 /* Speed, AutoNeg and MDI/MDI-X must all play nice */
764 if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) { 757 if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) {
765 DPRINTK(PROBE, INFO, 758 e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. "
766 "Speed, AutoNeg and MDI-X specifications are " 759 "Setting MDI-X to a compatible value.\n");
767 "incompatible. Setting MDI-X to a compatible value.\n");
768 } 760 }
769} 761}
770 762
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 4b0016d69530..1e73eddee24a 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -336,7 +336,6 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
336 struct e1000_hw *hw = &adapter->hw; 336 struct e1000_hw *hw = &adapter->hw;
337 static int global_quad_port_a; /* global port a indication */ 337 static int global_quad_port_a; /* global port a indication */
338 struct pci_dev *pdev = adapter->pdev; 338 struct pci_dev *pdev = adapter->pdev;
339 u16 eeprom_data = 0;
340 int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; 339 int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1;
341 s32 rc; 340 s32 rc;
342 341
@@ -387,16 +386,15 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
387 if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) 386 if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)
388 adapter->flags &= ~FLAG_HAS_WOL; 387 adapter->flags &= ~FLAG_HAS_WOL;
389 break; 388 break;
390
391 case e1000_82573: 389 case e1000_82573:
390 case e1000_82574:
391 case e1000_82583:
392 /* Disable ASPM L0s due to hardware errata */
393 e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L0S);
394
392 if (pdev->device == E1000_DEV_ID_82573L) { 395 if (pdev->device == E1000_DEV_ID_82573L) {
393 if (e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1, 396 adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
394 &eeprom_data) < 0) 397 adapter->max_hw_frame_size = DEFAULT_JUMBO;
395 break;
396 if (!(eeprom_data & NVM_WORD1A_ASPM_MASK)) {
397 adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
398 adapter->max_hw_frame_size = DEFAULT_JUMBO;
399 }
400 } 398 }
401 break; 399 break;
402 default: 400 default:
@@ -1792,6 +1790,7 @@ struct e1000_info e1000_82571_info = {
1792 | FLAG_RESET_OVERWRITES_LAA /* errata */ 1790 | FLAG_RESET_OVERWRITES_LAA /* errata */
1793 | FLAG_TARC_SPEED_MODE_BIT /* errata */ 1791 | FLAG_TARC_SPEED_MODE_BIT /* errata */
1794 | FLAG_APME_CHECK_PORT_B, 1792 | FLAG_APME_CHECK_PORT_B,
1793 .flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */
1795 .pba = 38, 1794 .pba = 38,
1796 .max_hw_frame_size = DEFAULT_JUMBO, 1795 .max_hw_frame_size = DEFAULT_JUMBO,
1797 .get_variants = e1000_get_variants_82571, 1796 .get_variants = e1000_get_variants_82571,
@@ -1809,6 +1808,7 @@ struct e1000_info e1000_82572_info = {
1809 | FLAG_RX_CSUM_ENABLED 1808 | FLAG_RX_CSUM_ENABLED
1810 | FLAG_HAS_CTRLEXT_ON_LOAD 1809 | FLAG_HAS_CTRLEXT_ON_LOAD
1811 | FLAG_TARC_SPEED_MODE_BIT, /* errata */ 1810 | FLAG_TARC_SPEED_MODE_BIT, /* errata */
1811 .flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */
1812 .pba = 38, 1812 .pba = 38,
1813 .max_hw_frame_size = DEFAULT_JUMBO, 1813 .max_hw_frame_size = DEFAULT_JUMBO,
1814 .get_variants = e1000_get_variants_82571, 1814 .get_variants = e1000_get_variants_82571,
@@ -1820,13 +1820,11 @@ struct e1000_info e1000_82572_info = {
1820struct e1000_info e1000_82573_info = { 1820struct e1000_info e1000_82573_info = {
1821 .mac = e1000_82573, 1821 .mac = e1000_82573,
1822 .flags = FLAG_HAS_HW_VLAN_FILTER 1822 .flags = FLAG_HAS_HW_VLAN_FILTER
1823 | FLAG_HAS_JUMBO_FRAMES
1824 | FLAG_HAS_WOL 1823 | FLAG_HAS_WOL
1825 | FLAG_APME_IN_CTRL3 1824 | FLAG_APME_IN_CTRL3
1826 | FLAG_RX_CSUM_ENABLED 1825 | FLAG_RX_CSUM_ENABLED
1827 | FLAG_HAS_SMART_POWER_DOWN 1826 | FLAG_HAS_SMART_POWER_DOWN
1828 | FLAG_HAS_AMT 1827 | FLAG_HAS_AMT
1829 | FLAG_HAS_ERT
1830 | FLAG_HAS_SWSM_ON_LOAD, 1828 | FLAG_HAS_SWSM_ON_LOAD,
1831 .pba = 20, 1829 .pba = 20,
1832 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, 1830 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
@@ -1847,7 +1845,7 @@ struct e1000_info e1000_82574_info = {
1847 | FLAG_HAS_SMART_POWER_DOWN 1845 | FLAG_HAS_SMART_POWER_DOWN
1848 | FLAG_HAS_AMT 1846 | FLAG_HAS_AMT
1849 | FLAG_HAS_CTRLEXT_ON_LOAD, 1847 | FLAG_HAS_CTRLEXT_ON_LOAD,
1850 .pba = 20, 1848 .pba = 36,
1851 .max_hw_frame_size = DEFAULT_JUMBO, 1849 .max_hw_frame_size = DEFAULT_JUMBO,
1852 .get_variants = e1000_get_variants_82571, 1850 .get_variants = e1000_get_variants_82571,
1853 .mac_ops = &e82571_mac_ops, 1851 .mac_ops = &e82571_mac_ops,
@@ -1864,7 +1862,7 @@ struct e1000_info e1000_82583_info = {
1864 | FLAG_HAS_SMART_POWER_DOWN 1862 | FLAG_HAS_SMART_POWER_DOWN
1865 | FLAG_HAS_AMT 1863 | FLAG_HAS_AMT
1866 | FLAG_HAS_CTRLEXT_ON_LOAD, 1864 | FLAG_HAS_CTRLEXT_ON_LOAD,
1867 .pba = 20, 1865 .pba = 36,
1868 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, 1866 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
1869 .get_variants = e1000_get_variants_82571, 1867 .get_variants = e1000_get_variants_82571,
1870 .mac_ops = &e82571_mac_ops, 1868 .mac_ops = &e82571_mac_ops,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index e301e26d6897..7f760aa9efe5 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -214,6 +214,8 @@
214#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ 214#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
215#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ 215#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
216#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ 216#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
217#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
218#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
217#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 219#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
218#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 220#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
219#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ 221#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 12648a1cdb78..c0b3db40bd73 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -37,6 +37,7 @@
37#include <linux/io.h> 37#include <linux/io.h>
38#include <linux/netdevice.h> 38#include <linux/netdevice.h>
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/pci-aspm.h>
40 41
41#include "hw.h" 42#include "hw.h"
42 43
@@ -188,6 +189,8 @@ struct e1000_buffer {
188 unsigned long time_stamp; 189 unsigned long time_stamp;
189 u16 length; 190 u16 length;
190 u16 next_to_watch; 191 u16 next_to_watch;
192 unsigned int segs;
193 unsigned int bytecount;
191 u16 mapped_as_page; 194 u16 mapped_as_page;
192 }; 195 };
193 /* Rx */ 196 /* Rx */
@@ -370,7 +373,7 @@ struct e1000_adapter {
370struct e1000_info { 373struct e1000_info {
371 enum e1000_mac_type mac; 374 enum e1000_mac_type mac;
372 unsigned int flags; 375 unsigned int flags;
373 unsigned int flags2; 376 unsigned int flags2;
374 u32 pba; 377 u32 pba;
375 u32 max_hw_frame_size; 378 u32 max_hw_frame_size;
376 s32 (*get_variants)(struct e1000_adapter *); 379 s32 (*get_variants)(struct e1000_adapter *);
@@ -417,6 +420,7 @@ struct e1000_info {
417#define FLAG2_CRC_STRIPPING (1 << 0) 420#define FLAG2_CRC_STRIPPING (1 << 0)
418#define FLAG2_HAS_PHY_WAKEUP (1 << 1) 421#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
419#define FLAG2_IS_DISCARDING (1 << 2) 422#define FLAG2_IS_DISCARDING (1 << 2)
423#define FLAG2_DISABLE_ASPM_L1 (1 << 3)
420 424
421#define E1000_RX_DESC_PS(R, i) \ 425#define E1000_RX_DESC_PS(R, i) \
422 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 426 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -457,6 +461,7 @@ extern void e1000e_update_stats(struct e1000_adapter *adapter);
457extern bool e1000e_has_link(struct e1000_adapter *adapter); 461extern bool e1000e_has_link(struct e1000_adapter *adapter);
458extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); 462extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
459extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); 463extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
464extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
460 465
461extern unsigned int copybreak; 466extern unsigned int copybreak;
462 467
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 983493f2330c..6ff376cfe139 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -412,7 +412,6 @@ static int e1000_set_tso(struct net_device *netdev, u32 data)
412 netdev->features &= ~NETIF_F_TSO6; 412 netdev->features &= ~NETIF_F_TSO6;
413 } 413 }
414 414
415 e_info("TSO is %s\n", data ? "Enabled" : "Disabled");
416 adapter->flags |= FLAG_TSO_FORCE; 415 adapter->flags |= FLAG_TSO_FORCE;
417 return 0; 416 return 0;
418} 417}
@@ -1069,10 +1068,10 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
1069 if (tx_ring->desc && tx_ring->buffer_info) { 1068 if (tx_ring->desc && tx_ring->buffer_info) {
1070 for (i = 0; i < tx_ring->count; i++) { 1069 for (i = 0; i < tx_ring->count; i++) {
1071 if (tx_ring->buffer_info[i].dma) 1070 if (tx_ring->buffer_info[i].dma)
1072 pci_unmap_single(pdev, 1071 dma_unmap_single(&pdev->dev,
1073 tx_ring->buffer_info[i].dma, 1072 tx_ring->buffer_info[i].dma,
1074 tx_ring->buffer_info[i].length, 1073 tx_ring->buffer_info[i].length,
1075 PCI_DMA_TODEVICE); 1074 DMA_TO_DEVICE);
1076 if (tx_ring->buffer_info[i].skb) 1075 if (tx_ring->buffer_info[i].skb)
1077 dev_kfree_skb(tx_ring->buffer_info[i].skb); 1076 dev_kfree_skb(tx_ring->buffer_info[i].skb);
1078 } 1077 }
@@ -1081,9 +1080,9 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
1081 if (rx_ring->desc && rx_ring->buffer_info) { 1080 if (rx_ring->desc && rx_ring->buffer_info) {
1082 for (i = 0; i < rx_ring->count; i++) { 1081 for (i = 0; i < rx_ring->count; i++) {
1083 if (rx_ring->buffer_info[i].dma) 1082 if (rx_ring->buffer_info[i].dma)
1084 pci_unmap_single(pdev, 1083 dma_unmap_single(&pdev->dev,
1085 rx_ring->buffer_info[i].dma, 1084 rx_ring->buffer_info[i].dma,
1086 2048, PCI_DMA_FROMDEVICE); 1085 2048, DMA_FROM_DEVICE);
1087 if (rx_ring->buffer_info[i].skb) 1086 if (rx_ring->buffer_info[i].skb)
1088 dev_kfree_skb(rx_ring->buffer_info[i].skb); 1087 dev_kfree_skb(rx_ring->buffer_info[i].skb);
1089 } 1088 }
@@ -1163,9 +1162,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1163 tx_ring->buffer_info[i].skb = skb; 1162 tx_ring->buffer_info[i].skb = skb;
1164 tx_ring->buffer_info[i].length = skb->len; 1163 tx_ring->buffer_info[i].length = skb->len;
1165 tx_ring->buffer_info[i].dma = 1164 tx_ring->buffer_info[i].dma =
1166 pci_map_single(pdev, skb->data, skb->len, 1165 dma_map_single(&pdev->dev, skb->data, skb->len,
1167 PCI_DMA_TODEVICE); 1166 DMA_TO_DEVICE);
1168 if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) { 1167 if (dma_mapping_error(&pdev->dev,
1168 tx_ring->buffer_info[i].dma)) {
1169 ret_val = 4; 1169 ret_val = 4;
1170 goto err_nomem; 1170 goto err_nomem;
1171 } 1171 }
@@ -1226,9 +1226,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1226 skb_reserve(skb, NET_IP_ALIGN); 1226 skb_reserve(skb, NET_IP_ALIGN);
1227 rx_ring->buffer_info[i].skb = skb; 1227 rx_ring->buffer_info[i].skb = skb;
1228 rx_ring->buffer_info[i].dma = 1228 rx_ring->buffer_info[i].dma =
1229 pci_map_single(pdev, skb->data, 2048, 1229 dma_map_single(&pdev->dev, skb->data, 2048,
1230 PCI_DMA_FROMDEVICE); 1230 DMA_FROM_DEVICE);
1231 if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) { 1231 if (dma_mapping_error(&pdev->dev,
1232 rx_ring->buffer_info[i].dma)) {
1232 ret_val = 8; 1233 ret_val = 8;
1233 goto err_nomem; 1234 goto err_nomem;
1234 } 1235 }
@@ -1556,10 +1557,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1556 for (i = 0; i < 64; i++) { /* send the packets */ 1557 for (i = 0; i < 64; i++) { /* send the packets */
1557 e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb, 1558 e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb,
1558 1024); 1559 1024);
1559 pci_dma_sync_single_for_device(pdev, 1560 dma_sync_single_for_device(&pdev->dev,
1560 tx_ring->buffer_info[k].dma, 1561 tx_ring->buffer_info[k].dma,
1561 tx_ring->buffer_info[k].length, 1562 tx_ring->buffer_info[k].length,
1562 PCI_DMA_TODEVICE); 1563 DMA_TO_DEVICE);
1563 k++; 1564 k++;
1564 if (k == tx_ring->count) 1565 if (k == tx_ring->count)
1565 k = 0; 1566 k = 0;
@@ -1569,9 +1570,9 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1569 time = jiffies; /* set the start time for the receive */ 1570 time = jiffies; /* set the start time for the receive */
1570 good_cnt = 0; 1571 good_cnt = 0;
1571 do { /* receive the sent packets */ 1572 do { /* receive the sent packets */
1572 pci_dma_sync_single_for_cpu(pdev, 1573 dma_sync_single_for_cpu(&pdev->dev,
1573 rx_ring->buffer_info[l].dma, 2048, 1574 rx_ring->buffer_info[l].dma, 2048,
1574 PCI_DMA_FROMDEVICE); 1575 DMA_FROM_DEVICE);
1575 1576
1576 ret_val = e1000_check_lbtest_frame( 1577 ret_val = e1000_check_lbtest_frame(
1577 rx_ring->buffer_info[l].skb, 1024); 1578 rx_ring->buffer_info[l].skb, 1024);
@@ -1889,7 +1890,7 @@ static int e1000_get_coalesce(struct net_device *netdev,
1889{ 1890{
1890 struct e1000_adapter *adapter = netdev_priv(netdev); 1891 struct e1000_adapter *adapter = netdev_priv(netdev);
1891 1892
1892 if (adapter->itr_setting <= 3) 1893 if (adapter->itr_setting <= 4)
1893 ec->rx_coalesce_usecs = adapter->itr_setting; 1894 ec->rx_coalesce_usecs = adapter->itr_setting;
1894 else 1895 else
1895 ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; 1896 ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
@@ -1904,12 +1905,14 @@ static int e1000_set_coalesce(struct net_device *netdev,
1904 struct e1000_hw *hw = &adapter->hw; 1905 struct e1000_hw *hw = &adapter->hw;
1905 1906
1906 if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || 1907 if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
1907 ((ec->rx_coalesce_usecs > 3) && 1908 ((ec->rx_coalesce_usecs > 4) &&
1908 (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || 1909 (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
1909 (ec->rx_coalesce_usecs == 2)) 1910 (ec->rx_coalesce_usecs == 2))
1910 return -EINVAL; 1911 return -EINVAL;
1911 1912
1912 if (ec->rx_coalesce_usecs <= 3) { 1913 if (ec->rx_coalesce_usecs == 4) {
1914 adapter->itr = adapter->itr_setting = 4;
1915 } else if (ec->rx_coalesce_usecs <= 3) {
1913 adapter->itr = 20000; 1916 adapter->itr = 20000;
1914 adapter->itr_setting = ec->rx_coalesce_usecs; 1917 adapter->itr_setting = ec->rx_coalesce_usecs;
1915 } else { 1918 } else {
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 5059c22155d9..b8c4dce01a04 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -83,6 +83,8 @@
83 83
84 84
85#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */ 85#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
86/* FW established a valid mode */
87#define E1000_ICH_FWSM_FW_VALID 0x00008000
86 88
87#define E1000_ICH_MNG_IAMT_MODE 0x2 89#define E1000_ICH_MNG_IAMT_MODE 0x2
88 90
@@ -259,6 +261,7 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
259static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) 261static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
260{ 262{
261 struct e1000_phy_info *phy = &hw->phy; 263 struct e1000_phy_info *phy = &hw->phy;
264 u32 ctrl;
262 s32 ret_val = 0; 265 s32 ret_val = 0;
263 266
264 phy->addr = 1; 267 phy->addr = 1;
@@ -274,6 +277,33 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
274 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 277 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
275 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 278 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
276 279
280 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
281 /*
282 * The MAC-PHY interconnect may still be in SMBus mode
283 * after Sx->S0. Toggle the LANPHYPC Value bit to force
284 * the interconnect to PCIe mode, but only if there is no
285 * firmware present otherwise firmware will have done it.
286 */
287 ctrl = er32(CTRL);
288 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
289 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
290 ew32(CTRL, ctrl);
291 udelay(10);
292 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
293 ew32(CTRL, ctrl);
294 msleep(50);
295 }
296
297 /*
298 * Reset the PHY before any acccess to it. Doing so, ensures that
299 * the PHY is in a known good state before we read/write PHY registers.
300 * The generic reset is sufficient here, because we haven't determined
301 * the PHY type yet.
302 */
303 ret_val = e1000e_phy_hw_reset_generic(hw);
304 if (ret_val)
305 goto out;
306
277 phy->id = e1000_phy_unknown; 307 phy->id = e1000_phy_unknown;
278 ret_val = e1000e_get_phy_id(hw); 308 ret_val = e1000e_get_phy_id(hw);
279 if (ret_val) 309 if (ret_val)
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 5f70c437fa41..c5f65a29865a 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -52,7 +52,7 @@
52 52
53#include "e1000.h" 53#include "e1000.h"
54 54
55#define DRV_VERSION "1.0.2-k2" 55#define DRV_VERSION "1.0.2-k4"
56char e1000e_driver_name[] = "e1000e"; 56char e1000e_driver_name[] = "e1000e";
57const char e1000e_driver_version[] = DRV_VERSION; 57const char e1000e_driver_version[] = DRV_VERSION;
58 58
@@ -69,6 +69,361 @@ static const struct e1000_info *e1000_info_tbl[] = {
69 [board_pchlan] = &e1000_pch_info, 69 [board_pchlan] = &e1000_pch_info,
70}; 70};
71 71
72struct e1000_reg_info {
73 u32 ofs;
74 char *name;
75};
76
77#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
78#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
79#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
80#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
81#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
82
83#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
84#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
85#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
86#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
87#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
88
89static const struct e1000_reg_info e1000_reg_info_tbl[] = {
90
91 /* General Registers */
92 {E1000_CTRL, "CTRL"},
93 {E1000_STATUS, "STATUS"},
94 {E1000_CTRL_EXT, "CTRL_EXT"},
95
96 /* Interrupt Registers */
97 {E1000_ICR, "ICR"},
98
99 /* RX Registers */
100 {E1000_RCTL, "RCTL"},
101 {E1000_RDLEN, "RDLEN"},
102 {E1000_RDH, "RDH"},
103 {E1000_RDT, "RDT"},
104 {E1000_RDTR, "RDTR"},
105 {E1000_RXDCTL(0), "RXDCTL"},
106 {E1000_ERT, "ERT"},
107 {E1000_RDBAL, "RDBAL"},
108 {E1000_RDBAH, "RDBAH"},
109 {E1000_RDFH, "RDFH"},
110 {E1000_RDFT, "RDFT"},
111 {E1000_RDFHS, "RDFHS"},
112 {E1000_RDFTS, "RDFTS"},
113 {E1000_RDFPC, "RDFPC"},
114
115 /* TX Registers */
116 {E1000_TCTL, "TCTL"},
117 {E1000_TDBAL, "TDBAL"},
118 {E1000_TDBAH, "TDBAH"},
119 {E1000_TDLEN, "TDLEN"},
120 {E1000_TDH, "TDH"},
121 {E1000_TDT, "TDT"},
122 {E1000_TIDV, "TIDV"},
123 {E1000_TXDCTL(0), "TXDCTL"},
124 {E1000_TADV, "TADV"},
125 {E1000_TARC(0), "TARC"},
126 {E1000_TDFH, "TDFH"},
127 {E1000_TDFT, "TDFT"},
128 {E1000_TDFHS, "TDFHS"},
129 {E1000_TDFTS, "TDFTS"},
130 {E1000_TDFPC, "TDFPC"},
131
132 /* List Terminator */
133 {}
134};
135
136/*
137 * e1000_regdump - register printout routine
138 */
139static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
140{
141 int n = 0;
142 char rname[16];
143 u32 regs[8];
144
145 switch (reginfo->ofs) {
146 case E1000_RXDCTL(0):
147 for (n = 0; n < 2; n++)
148 regs[n] = __er32(hw, E1000_RXDCTL(n));
149 break;
150 case E1000_TXDCTL(0):
151 for (n = 0; n < 2; n++)
152 regs[n] = __er32(hw, E1000_TXDCTL(n));
153 break;
154 case E1000_TARC(0):
155 for (n = 0; n < 2; n++)
156 regs[n] = __er32(hw, E1000_TARC(n));
157 break;
158 default:
159 printk(KERN_INFO "%-15s %08x\n",
160 reginfo->name, __er32(hw, reginfo->ofs));
161 return;
162 }
163
164 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
165 printk(KERN_INFO "%-15s ", rname);
166 for (n = 0; n < 2; n++)
167 printk(KERN_CONT "%08x ", regs[n]);
168 printk(KERN_CONT "\n");
169}
170
171
172/*
173 * e1000e_dump - Print registers, tx-ring and rx-ring
174 */
175static void e1000e_dump(struct e1000_adapter *adapter)
176{
177 struct net_device *netdev = adapter->netdev;
178 struct e1000_hw *hw = &adapter->hw;
179 struct e1000_reg_info *reginfo;
180 struct e1000_ring *tx_ring = adapter->tx_ring;
181 struct e1000_tx_desc *tx_desc;
182 struct my_u0 { u64 a; u64 b; } *u0;
183 struct e1000_buffer *buffer_info;
184 struct e1000_ring *rx_ring = adapter->rx_ring;
185 union e1000_rx_desc_packet_split *rx_desc_ps;
186 struct e1000_rx_desc *rx_desc;
187 struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1;
188 u32 staterr;
189 int i = 0;
190
191 if (!netif_msg_hw(adapter))
192 return;
193
194 /* Print netdevice Info */
195 if (netdev) {
196 dev_info(&adapter->pdev->dev, "Net device Info\n");
197 printk(KERN_INFO "Device Name state "
198 "trans_start last_rx\n");
199 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
200 netdev->name,
201 netdev->state,
202 netdev->trans_start,
203 netdev->last_rx);
204 }
205
206 /* Print Registers */
207 dev_info(&adapter->pdev->dev, "Register Dump\n");
208 printk(KERN_INFO " Register Name Value\n");
209 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
210 reginfo->name; reginfo++) {
211 e1000_regdump(hw, reginfo);
212 }
213
214 /* Print TX Ring Summary */
215 if (!netdev || !netif_running(netdev))
216 goto exit;
217
218 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
219 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
220 " leng ntw timestamp\n");
221 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
222 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
223 0, tx_ring->next_to_use, tx_ring->next_to_clean,
224 (u64)buffer_info->dma,
225 buffer_info->length,
226 buffer_info->next_to_watch,
227 (u64)buffer_info->time_stamp);
228
229 /* Print TX Rings */
230 if (!netif_msg_tx_done(adapter))
231 goto rx_ring_summary;
232
233 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
234
235 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
236 *
237 * Legacy Transmit Descriptor
238 * +--------------------------------------------------------------+
239 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
240 * +--------------------------------------------------------------+
241 * 8 | Special | CSS | Status | CMD | CSO | Length |
242 * +--------------------------------------------------------------+
243 * 63 48 47 36 35 32 31 24 23 16 15 0
244 *
245 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
246 * 63 48 47 40 39 32 31 16 15 8 7 0
247 * +----------------------------------------------------------------+
248 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
249 * +----------------------------------------------------------------+
250 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
251 * +----------------------------------------------------------------+
252 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
253 *
254 * Extended Data Descriptor (DTYP=0x1)
255 * +----------------------------------------------------------------+
256 * 0 | Buffer Address [63:0] |
257 * +----------------------------------------------------------------+
258 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
259 * +----------------------------------------------------------------+
260 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
261 */
262 printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
263 " [bi->dma ] leng ntw timestamp bi->skb "
264 "<-- Legacy format\n");
265 printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
266 " [bi->dma ] leng ntw timestamp bi->skb "
267 "<-- Ext Context format\n");
268 printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
269 " [bi->dma ] leng ntw timestamp bi->skb "
270 "<-- Ext Data format\n");
271 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
272 tx_desc = E1000_TX_DESC(*tx_ring, i);
273 buffer_info = &tx_ring->buffer_info[i];
274 u0 = (struct my_u0 *)tx_desc;
275 printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
276 "%04X %3X %016llX %p",
277 (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
278 ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
279 le64_to_cpu(u0->a), le64_to_cpu(u0->b),
280 (u64)buffer_info->dma, buffer_info->length,
281 buffer_info->next_to_watch, (u64)buffer_info->time_stamp,
282 buffer_info->skb);
283 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
284 printk(KERN_CONT " NTC/U\n");
285 else if (i == tx_ring->next_to_use)
286 printk(KERN_CONT " NTU\n");
287 else if (i == tx_ring->next_to_clean)
288 printk(KERN_CONT " NTC\n");
289 else
290 printk(KERN_CONT "\n");
291
292 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
293 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
294 16, 1, phys_to_virt(buffer_info->dma),
295 buffer_info->length, true);
296 }
297
298 /* Print RX Rings Summary */
299rx_ring_summary:
300 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
301 printk(KERN_INFO "Queue [NTU] [NTC]\n");
302 printk(KERN_INFO " %5d %5X %5X\n", 0,
303 rx_ring->next_to_use, rx_ring->next_to_clean);
304
305 /* Print RX Rings */
306 if (!netif_msg_rx_status(adapter))
307 goto exit;
308
309 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
310 switch (adapter->rx_ps_pages) {
311 case 1:
312 case 2:
313 case 3:
314 /* [Extended] Packet Split Receive Descriptor Format
315 *
316 * +-----------------------------------------------------+
317 * 0 | Buffer Address 0 [63:0] |
318 * +-----------------------------------------------------+
319 * 8 | Buffer Address 1 [63:0] |
320 * +-----------------------------------------------------+
321 * 16 | Buffer Address 2 [63:0] |
322 * +-----------------------------------------------------+
323 * 24 | Buffer Address 3 [63:0] |
324 * +-----------------------------------------------------+
325 */
326 printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
327 "[buffer 1 63:0 ] "
328 "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
329 "[bi->skb] <-- Ext Pkt Split format\n");
330 /* [Extended] Receive Descriptor (Write-Back) Format
331 *
332 * 63 48 47 32 31 13 12 8 7 4 3 0
333 * +------------------------------------------------------+
334 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
335 * | Checksum | Ident | | Queue | | Type |
336 * +------------------------------------------------------+
337 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
338 * +------------------------------------------------------+
339 * 63 48 47 32 31 20 19 0
340 */
341 printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
342 "[vl l0 ee es] "
343 "[ l3 l2 l1 hs] [reserved ] ---------------- "
344 "[bi->skb] <-- Ext Rx Write-Back format\n");
345 for (i = 0; i < rx_ring->count; i++) {
346 buffer_info = &rx_ring->buffer_info[i];
347 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
348 u1 = (struct my_u1 *)rx_desc_ps;
349 staterr =
350 le32_to_cpu(rx_desc_ps->wb.middle.status_error);
351 if (staterr & E1000_RXD_STAT_DD) {
352 /* Descriptor Done */
353 printk(KERN_INFO "RWB[0x%03X] %016llX "
354 "%016llX %016llX %016llX "
355 "---------------- %p", i,
356 le64_to_cpu(u1->a),
357 le64_to_cpu(u1->b),
358 le64_to_cpu(u1->c),
359 le64_to_cpu(u1->d),
360 buffer_info->skb);
361 } else {
362 printk(KERN_INFO "R [0x%03X] %016llX "
363 "%016llX %016llX %016llX %016llX %p", i,
364 le64_to_cpu(u1->a),
365 le64_to_cpu(u1->b),
366 le64_to_cpu(u1->c),
367 le64_to_cpu(u1->d),
368 (u64)buffer_info->dma,
369 buffer_info->skb);
370
371 if (netif_msg_pktdata(adapter))
372 print_hex_dump(KERN_INFO, "",
373 DUMP_PREFIX_ADDRESS, 16, 1,
374 phys_to_virt(buffer_info->dma),
375 adapter->rx_ps_bsize0, true);
376 }
377
378 if (i == rx_ring->next_to_use)
379 printk(KERN_CONT " NTU\n");
380 else if (i == rx_ring->next_to_clean)
381 printk(KERN_CONT " NTC\n");
382 else
383 printk(KERN_CONT "\n");
384 }
385 break;
386 default:
387 case 0:
388 /* Legacy Receive Descriptor Format
389 *
390 * +-----------------------------------------------------+
391 * | Buffer Address [63:0] |
392 * +-----------------------------------------------------+
393 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
394 * +-----------------------------------------------------+
395 * 63 48 47 40 39 32 31 16 15 0
396 */
397 printk(KERN_INFO "Rl[desc] [address 63:0 ] "
398 "[vl er S cks ln] [bi->dma ] [bi->skb] "
399 "<-- Legacy format\n");
400 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
401 rx_desc = E1000_RX_DESC(*rx_ring, i);
402 buffer_info = &rx_ring->buffer_info[i];
403 u0 = (struct my_u0 *)rx_desc;
404 printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
405 "%016llX %p",
406 i, le64_to_cpu(u0->a), le64_to_cpu(u0->b),
407 (u64)buffer_info->dma, buffer_info->skb);
408 if (i == rx_ring->next_to_use)
409 printk(KERN_CONT " NTU\n");
410 else if (i == rx_ring->next_to_clean)
411 printk(KERN_CONT " NTC\n");
412 else
413 printk(KERN_CONT "\n");
414
415 if (netif_msg_pktdata(adapter))
416 print_hex_dump(KERN_INFO, "",
417 DUMP_PREFIX_ADDRESS,
418 16, 1, phys_to_virt(buffer_info->dma),
419 adapter->rx_buffer_len, true);
420 }
421 }
422
423exit:
424 return;
425}
426
72/** 427/**
73 * e1000_desc_unused - calculate if we have unused descriptors 428 * e1000_desc_unused - calculate if we have unused descriptors
74 **/ 429 **/
@@ -181,10 +536,10 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
181 536
182 buffer_info->skb = skb; 537 buffer_info->skb = skb;
183map_skb: 538map_skb:
184 buffer_info->dma = pci_map_single(pdev, skb->data, 539 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
185 adapter->rx_buffer_len, 540 adapter->rx_buffer_len,
186 PCI_DMA_FROMDEVICE); 541 DMA_FROM_DEVICE);
187 if (pci_dma_mapping_error(pdev, buffer_info->dma)) { 542 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
188 dev_err(&pdev->dev, "RX DMA map failed\n"); 543 dev_err(&pdev->dev, "RX DMA map failed\n");
189 adapter->rx_dma_failed++; 544 adapter->rx_dma_failed++;
190 break; 545 break;
@@ -193,26 +548,23 @@ map_skb:
193 rx_desc = E1000_RX_DESC(*rx_ring, i); 548 rx_desc = E1000_RX_DESC(*rx_ring, i);
194 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 549 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
195 550
551 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
552 /*
553 * Force memory writes to complete before letting h/w
554 * know there are new descriptors to fetch. (Only
555 * applicable for weak-ordered memory model archs,
556 * such as IA-64).
557 */
558 wmb();
559 writel(i, adapter->hw.hw_addr + rx_ring->tail);
560 }
196 i++; 561 i++;
197 if (i == rx_ring->count) 562 if (i == rx_ring->count)
198 i = 0; 563 i = 0;
199 buffer_info = &rx_ring->buffer_info[i]; 564 buffer_info = &rx_ring->buffer_info[i];
200 } 565 }
201 566
202 if (rx_ring->next_to_use != i) { 567 rx_ring->next_to_use = i;
203 rx_ring->next_to_use = i;
204 if (i-- == 0)
205 i = (rx_ring->count - 1);
206
207 /*
208 * Force memory writes to complete before letting h/w
209 * know there are new descriptors to fetch. (Only
210 * applicable for weak-ordered memory model archs,
211 * such as IA-64).
212 */
213 wmb();
214 writel(i, adapter->hw.hw_addr + rx_ring->tail);
215 }
216} 568}
217 569
218/** 570/**
@@ -250,11 +602,12 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
250 adapter->alloc_rx_buff_failed++; 602 adapter->alloc_rx_buff_failed++;
251 goto no_buffers; 603 goto no_buffers;
252 } 604 }
253 ps_page->dma = pci_map_page(pdev, 605 ps_page->dma = dma_map_page(&pdev->dev,
254 ps_page->page, 606 ps_page->page,
255 0, PAGE_SIZE, 607 0, PAGE_SIZE,
256 PCI_DMA_FROMDEVICE); 608 DMA_FROM_DEVICE);
257 if (pci_dma_mapping_error(pdev, ps_page->dma)) { 609 if (dma_mapping_error(&pdev->dev,
610 ps_page->dma)) {
258 dev_err(&adapter->pdev->dev, 611 dev_err(&adapter->pdev->dev,
259 "RX DMA page map failed\n"); 612 "RX DMA page map failed\n");
260 adapter->rx_dma_failed++; 613 adapter->rx_dma_failed++;
@@ -279,10 +632,10 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
279 } 632 }
280 633
281 buffer_info->skb = skb; 634 buffer_info->skb = skb;
282 buffer_info->dma = pci_map_single(pdev, skb->data, 635 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
283 adapter->rx_ps_bsize0, 636 adapter->rx_ps_bsize0,
284 PCI_DMA_FROMDEVICE); 637 DMA_FROM_DEVICE);
285 if (pci_dma_mapping_error(pdev, buffer_info->dma)) { 638 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
286 dev_err(&pdev->dev, "RX DMA map failed\n"); 639 dev_err(&pdev->dev, "RX DMA map failed\n");
287 adapter->rx_dma_failed++; 640 adapter->rx_dma_failed++;
288 /* cleanup skb */ 641 /* cleanup skb */
@@ -293,6 +646,17 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
293 646
294 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); 647 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
295 648
649 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
650 /*
651 * Force memory writes to complete before letting h/w
652 * know there are new descriptors to fetch. (Only
653 * applicable for weak-ordered memory model archs,
654 * such as IA-64).
655 */
656 wmb();
657 writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
658 }
659
296 i++; 660 i++;
297 if (i == rx_ring->count) 661 if (i == rx_ring->count)
298 i = 0; 662 i = 0;
@@ -300,26 +664,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
300 } 664 }
301 665
302no_buffers: 666no_buffers:
303 if (rx_ring->next_to_use != i) { 667 rx_ring->next_to_use = i;
304 rx_ring->next_to_use = i;
305
306 if (!(i--))
307 i = (rx_ring->count - 1);
308
309 /*
310 * Force memory writes to complete before letting h/w
311 * know there are new descriptors to fetch. (Only
312 * applicable for weak-ordered memory model archs,
313 * such as IA-64).
314 */
315 wmb();
316 /*
317 * Hardware increments by 16 bytes, but packet split
318 * descriptors are 32 bytes...so we increment tail
319 * twice as much.
320 */
321 writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
322 }
323} 668}
324 669
325/** 670/**
@@ -369,10 +714,10 @@ check_page:
369 } 714 }
370 715
371 if (!buffer_info->dma) 716 if (!buffer_info->dma)
372 buffer_info->dma = pci_map_page(pdev, 717 buffer_info->dma = dma_map_page(&pdev->dev,
373 buffer_info->page, 0, 718 buffer_info->page, 0,
374 PAGE_SIZE, 719 PAGE_SIZE,
375 PCI_DMA_FROMDEVICE); 720 DMA_FROM_DEVICE);
376 721
377 rx_desc = E1000_RX_DESC(*rx_ring, i); 722 rx_desc = E1000_RX_DESC(*rx_ring, i);
378 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 723 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -446,10 +791,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
446 791
447 cleaned = 1; 792 cleaned = 1;
448 cleaned_count++; 793 cleaned_count++;
449 pci_unmap_single(pdev, 794 dma_unmap_single(&pdev->dev,
450 buffer_info->dma, 795 buffer_info->dma,
451 adapter->rx_buffer_len, 796 adapter->rx_buffer_len,
452 PCI_DMA_FROMDEVICE); 797 DMA_FROM_DEVICE);
453 buffer_info->dma = 0; 798 buffer_info->dma = 0;
454 799
455 length = le16_to_cpu(rx_desc->length); 800 length = le16_to_cpu(rx_desc->length);
@@ -550,12 +895,11 @@ static void e1000_put_txbuf(struct e1000_adapter *adapter,
550{ 895{
551 if (buffer_info->dma) { 896 if (buffer_info->dma) {
552 if (buffer_info->mapped_as_page) 897 if (buffer_info->mapped_as_page)
553 pci_unmap_page(adapter->pdev, buffer_info->dma, 898 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
554 buffer_info->length, PCI_DMA_TODEVICE); 899 buffer_info->length, DMA_TO_DEVICE);
555 else 900 else
556 pci_unmap_single(adapter->pdev, buffer_info->dma, 901 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
557 buffer_info->length, 902 buffer_info->length, DMA_TO_DEVICE);
558 PCI_DMA_TODEVICE);
559 buffer_info->dma = 0; 903 buffer_info->dma = 0;
560 } 904 }
561 if (buffer_info->skb) { 905 if (buffer_info->skb) {
@@ -646,14 +990,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
646 cleaned = (i == eop); 990 cleaned = (i == eop);
647 991
648 if (cleaned) { 992 if (cleaned) {
649 struct sk_buff *skb = buffer_info->skb; 993 total_tx_packets += buffer_info->segs;
650 unsigned int segs, bytecount; 994 total_tx_bytes += buffer_info->bytecount;
651 segs = skb_shinfo(skb)->gso_segs ?: 1;
652 /* multiply data chunks by size of headers */
653 bytecount = ((segs - 1) * skb_headlen(skb)) +
654 skb->len;
655 total_tx_packets += segs;
656 total_tx_bytes += bytecount;
657 } 995 }
658 996
659 e1000_put_txbuf(adapter, buffer_info); 997 e1000_put_txbuf(adapter, buffer_info);
@@ -756,9 +1094,9 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
756 1094
757 cleaned = 1; 1095 cleaned = 1;
758 cleaned_count++; 1096 cleaned_count++;
759 pci_unmap_single(pdev, buffer_info->dma, 1097 dma_unmap_single(&pdev->dev, buffer_info->dma,
760 adapter->rx_ps_bsize0, 1098 adapter->rx_ps_bsize0,
761 PCI_DMA_FROMDEVICE); 1099 DMA_FROM_DEVICE);
762 buffer_info->dma = 0; 1100 buffer_info->dma = 0;
763 1101
764 /* see !EOP comment in other rx routine */ 1102 /* see !EOP comment in other rx routine */
@@ -814,13 +1152,13 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
814 * kmap_atomic, so we can't hold the mapping 1152 * kmap_atomic, so we can't hold the mapping
815 * very long 1153 * very long
816 */ 1154 */
817 pci_dma_sync_single_for_cpu(pdev, ps_page->dma, 1155 dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
818 PAGE_SIZE, PCI_DMA_FROMDEVICE); 1156 PAGE_SIZE, DMA_FROM_DEVICE);
819 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); 1157 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
820 memcpy(skb_tail_pointer(skb), vaddr, l1); 1158 memcpy(skb_tail_pointer(skb), vaddr, l1);
821 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); 1159 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
822 pci_dma_sync_single_for_device(pdev, ps_page->dma, 1160 dma_sync_single_for_device(&pdev->dev, ps_page->dma,
823 PAGE_SIZE, PCI_DMA_FROMDEVICE); 1161 PAGE_SIZE, DMA_FROM_DEVICE);
824 1162
825 /* remove the CRC */ 1163 /* remove the CRC */
826 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) 1164 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
@@ -837,8 +1175,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
837 break; 1175 break;
838 1176
839 ps_page = &buffer_info->ps_pages[j]; 1177 ps_page = &buffer_info->ps_pages[j];
840 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, 1178 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
841 PCI_DMA_FROMDEVICE); 1179 DMA_FROM_DEVICE);
842 ps_page->dma = 0; 1180 ps_page->dma = 0;
843 skb_fill_page_desc(skb, j, ps_page->page, 0, length); 1181 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
844 ps_page->page = NULL; 1182 ps_page->page = NULL;
@@ -956,8 +1294,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
956 1294
957 cleaned = true; 1295 cleaned = true;
958 cleaned_count++; 1296 cleaned_count++;
959 pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE, 1297 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
960 PCI_DMA_FROMDEVICE); 1298 DMA_FROM_DEVICE);
961 buffer_info->dma = 0; 1299 buffer_info->dma = 0;
962 1300
963 length = le16_to_cpu(rx_desc->length); 1301 length = le16_to_cpu(rx_desc->length);
@@ -1093,17 +1431,17 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1093 buffer_info = &rx_ring->buffer_info[i]; 1431 buffer_info = &rx_ring->buffer_info[i];
1094 if (buffer_info->dma) { 1432 if (buffer_info->dma) {
1095 if (adapter->clean_rx == e1000_clean_rx_irq) 1433 if (adapter->clean_rx == e1000_clean_rx_irq)
1096 pci_unmap_single(pdev, buffer_info->dma, 1434 dma_unmap_single(&pdev->dev, buffer_info->dma,
1097 adapter->rx_buffer_len, 1435 adapter->rx_buffer_len,
1098 PCI_DMA_FROMDEVICE); 1436 DMA_FROM_DEVICE);
1099 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) 1437 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1100 pci_unmap_page(pdev, buffer_info->dma, 1438 dma_unmap_page(&pdev->dev, buffer_info->dma,
1101 PAGE_SIZE, 1439 PAGE_SIZE,
1102 PCI_DMA_FROMDEVICE); 1440 DMA_FROM_DEVICE);
1103 else if (adapter->clean_rx == e1000_clean_rx_irq_ps) 1441 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1104 pci_unmap_single(pdev, buffer_info->dma, 1442 dma_unmap_single(&pdev->dev, buffer_info->dma,
1105 adapter->rx_ps_bsize0, 1443 adapter->rx_ps_bsize0,
1106 PCI_DMA_FROMDEVICE); 1444 DMA_FROM_DEVICE);
1107 buffer_info->dma = 0; 1445 buffer_info->dma = 0;
1108 } 1446 }
1109 1447
@@ -1121,8 +1459,8 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1121 ps_page = &buffer_info->ps_pages[j]; 1459 ps_page = &buffer_info->ps_pages[j];
1122 if (!ps_page->page) 1460 if (!ps_page->page)
1123 break; 1461 break;
1124 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, 1462 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1125 PCI_DMA_FROMDEVICE); 1463 DMA_FROM_DEVICE);
1126 ps_page->dma = 0; 1464 ps_page->dma = 0;
1127 put_page(ps_page->page); 1465 put_page(ps_page->page);
1128 ps_page->page = NULL; 1466 ps_page->page = NULL;
@@ -3732,6 +4070,22 @@ link_up:
3732 } 4070 }
3733 } 4071 }
3734 4072
4073 /* Simple mode for Interrupt Throttle Rate (ITR) */
4074 if (adapter->itr_setting == 4) {
4075 /*
4076 * Symmetric Tx/Rx gets a reduced ITR=2000;
4077 * Total asymmetrical Tx or Rx gets ITR=8000;
4078 * everyone else is between 2000-8000.
4079 */
4080 u32 goc = (adapter->gotc + adapter->gorc) / 10000;
4081 u32 dif = (adapter->gotc > adapter->gorc ?
4082 adapter->gotc - adapter->gorc :
4083 adapter->gorc - adapter->gotc) / 10000;
4084 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
4085
4086 ew32(ITR, 1000000000 / (itr * 256));
4087 }
4088
3735 /* Cause software interrupt to ensure Rx ring is cleaned */ 4089 /* Cause software interrupt to ensure Rx ring is cleaned */
3736 if (adapter->msix_entries) 4090 if (adapter->msix_entries)
3737 ew32(ICS, adapter->rx_ring->ims_val); 4091 ew32(ICS, adapter->rx_ring->ims_val);
@@ -3906,7 +4260,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3906 struct e1000_buffer *buffer_info; 4260 struct e1000_buffer *buffer_info;
3907 unsigned int len = skb_headlen(skb); 4261 unsigned int len = skb_headlen(skb);
3908 unsigned int offset = 0, size, count = 0, i; 4262 unsigned int offset = 0, size, count = 0, i;
3909 unsigned int f; 4263 unsigned int f, bytecount, segs;
3910 4264
3911 i = tx_ring->next_to_use; 4265 i = tx_ring->next_to_use;
3912 4266
@@ -3917,10 +4271,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3917 buffer_info->length = size; 4271 buffer_info->length = size;
3918 buffer_info->time_stamp = jiffies; 4272 buffer_info->time_stamp = jiffies;
3919 buffer_info->next_to_watch = i; 4273 buffer_info->next_to_watch = i;
3920 buffer_info->dma = pci_map_single(pdev, skb->data + offset, 4274 buffer_info->dma = dma_map_single(&pdev->dev,
3921 size, PCI_DMA_TODEVICE); 4275 skb->data + offset,
4276 size, DMA_TO_DEVICE);
3922 buffer_info->mapped_as_page = false; 4277 buffer_info->mapped_as_page = false;
3923 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 4278 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
3924 goto dma_error; 4279 goto dma_error;
3925 4280
3926 len -= size; 4281 len -= size;
@@ -3952,11 +4307,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3952 buffer_info->length = size; 4307 buffer_info->length = size;
3953 buffer_info->time_stamp = jiffies; 4308 buffer_info->time_stamp = jiffies;
3954 buffer_info->next_to_watch = i; 4309 buffer_info->next_to_watch = i;
3955 buffer_info->dma = pci_map_page(pdev, frag->page, 4310 buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
3956 offset, size, 4311 offset, size,
3957 PCI_DMA_TODEVICE); 4312 DMA_TO_DEVICE);
3958 buffer_info->mapped_as_page = true; 4313 buffer_info->mapped_as_page = true;
3959 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 4314 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
3960 goto dma_error; 4315 goto dma_error;
3961 4316
3962 len -= size; 4317 len -= size;
@@ -3965,7 +4320,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3965 } 4320 }
3966 } 4321 }
3967 4322
4323 segs = skb_shinfo(skb)->gso_segs ?: 1;
4324 /* multiply data chunks by size of headers */
4325 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
4326
3968 tx_ring->buffer_info[i].skb = skb; 4327 tx_ring->buffer_info[i].skb = skb;
4328 tx_ring->buffer_info[i].segs = segs;
4329 tx_ring->buffer_info[i].bytecount = bytecount;
3969 tx_ring->buffer_info[first].next_to_watch = i; 4330 tx_ring->buffer_info[first].next_to_watch = i;
3970 4331
3971 return count; 4332 return count;
@@ -4268,6 +4629,8 @@ static void e1000_reset_task(struct work_struct *work)
4268 struct e1000_adapter *adapter; 4629 struct e1000_adapter *adapter;
4269 adapter = container_of(work, struct e1000_adapter, reset_task); 4630 adapter = container_of(work, struct e1000_adapter, reset_task);
4270 4631
4632 e1000e_dump(adapter);
4633 e_err("Reset adapter\n");
4271 e1000e_reinit_locked(adapter); 4634 e1000e_reinit_locked(adapter);
4272} 4635}
4273 4636
@@ -4310,6 +4673,14 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4310 return -EINVAL; 4673 return -EINVAL;
4311 } 4674 }
4312 4675
4676 /* 82573 Errata 17 */
4677 if (((adapter->hw.mac.type == e1000_82573) ||
4678 (adapter->hw.mac.type == e1000_82574)) &&
4679 (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
4680 adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
4681 e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
4682 }
4683
4313 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 4684 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
4314 msleep(1); 4685 msleep(1);
4315 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ 4686 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
@@ -4634,29 +5005,42 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
4634 } 5005 }
4635} 5006}
4636 5007
4637static void e1000e_disable_l1aspm(struct pci_dev *pdev) 5008#ifdef CONFIG_PCIEASPM
5009static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5010{
5011 pci_disable_link_state(pdev, state);
5012}
5013#else
5014static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
4638{ 5015{
4639 int pos; 5016 int pos;
4640 u16 val; 5017 u16 reg16;
4641 5018
4642 /* 5019 /*
4643 * 82573 workaround - disable L1 ASPM on mobile chipsets 5020 * Both device and parent should have the same ASPM setting.
4644 * 5021 * Disable ASPM in downstream component first and then upstream.
4645 * L1 ASPM on various mobile (ich7) chipsets do not behave properly
4646 * resulting in lost data or garbage information on the pci-e link
4647 * level. This could result in (false) bad EEPROM checksum errors,
4648 * long ping times (up to 2s) or even a system freeze/hang.
4649 *
4650 * Unfortunately this feature saves about 1W power consumption when
4651 * active.
4652 */ 5022 */
4653 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 5023 pos = pci_pcie_cap(pdev);
4654 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &val); 5024 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
4655 if (val & 0x2) { 5025 reg16 &= ~state;
4656 dev_warn(&pdev->dev, "Disabling L1 ASPM\n"); 5026 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
4657 val &= ~0x2; 5027
4658 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, val); 5028 if (!pdev->bus->self)
4659 } 5029 return;
5030
5031 pos = pci_pcie_cap(pdev->bus->self);
5032 pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
5033 reg16 &= ~state;
5034 pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
5035}
5036#endif
5037void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5038{
5039 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
5040 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
5041 (state & PCIE_LINK_STATE_L1) ? "L1" : "");
5042
5043 __e1000e_disable_aspm(pdev, state);
4660} 5044}
4661 5045
4662#ifdef CONFIG_PM_OPS 5046#ifdef CONFIG_PM_OPS
@@ -4672,7 +5056,11 @@ static int __e1000_resume(struct pci_dev *pdev)
4672 struct e1000_hw *hw = &adapter->hw; 5056 struct e1000_hw *hw = &adapter->hw;
4673 u32 err; 5057 u32 err;
4674 5058
4675 e1000e_disable_l1aspm(pdev); 5059 pci_set_power_state(pdev, PCI_D0);
5060 pci_restore_state(pdev);
5061 pci_save_state(pdev);
5062 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5063 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
4676 5064
4677 e1000e_set_interrupt_capability(adapter); 5065 e1000e_set_interrupt_capability(adapter);
4678 if (netif_running(netdev)) { 5066 if (netif_running(netdev)) {
@@ -4877,7 +5265,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4877 int err; 5265 int err;
4878 pci_ers_result_t result; 5266 pci_ers_result_t result;
4879 5267
4880 e1000e_disable_l1aspm(pdev); 5268 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5269 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
4881 err = pci_enable_device_mem(pdev); 5270 err = pci_enable_device_mem(pdev);
4882 if (err) { 5271 if (err) {
4883 dev_err(&pdev->dev, 5272 dev_err(&pdev->dev,
@@ -4971,13 +5360,6 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
4971 dev_warn(&adapter->pdev->dev, 5360 dev_warn(&adapter->pdev->dev,
4972 "Warning: detected DSPD enabled in EEPROM\n"); 5361 "Warning: detected DSPD enabled in EEPROM\n");
4973 } 5362 }
4974
4975 ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf);
4976 if (!ret_val && (le16_to_cpu(buf) & (3 << 2))) {
4977 /* ASPM enable */
4978 dev_warn(&adapter->pdev->dev,
4979 "Warning: detected ASPM enabled in EEPROM\n");
4980 }
4981} 5363}
4982 5364
4983static const struct net_device_ops e1000e_netdev_ops = { 5365static const struct net_device_ops e1000e_netdev_ops = {
@@ -5026,23 +5408,24 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5026 u16 eeprom_data = 0; 5408 u16 eeprom_data = 0;
5027 u16 eeprom_apme_mask = E1000_EEPROM_APME; 5409 u16 eeprom_apme_mask = E1000_EEPROM_APME;
5028 5410
5029 e1000e_disable_l1aspm(pdev); 5411 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
5412 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
5030 5413
5031 err = pci_enable_device_mem(pdev); 5414 err = pci_enable_device_mem(pdev);
5032 if (err) 5415 if (err)
5033 return err; 5416 return err;
5034 5417
5035 pci_using_dac = 0; 5418 pci_using_dac = 0;
5036 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 5419 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
5037 if (!err) { 5420 if (!err) {
5038 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 5421 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
5039 if (!err) 5422 if (!err)
5040 pci_using_dac = 1; 5423 pci_using_dac = 1;
5041 } else { 5424 } else {
5042 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 5425 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
5043 if (err) { 5426 if (err) {
5044 err = pci_set_consistent_dma_mask(pdev, 5427 err = dma_set_coherent_mask(&pdev->dev,
5045 DMA_BIT_MASK(32)); 5428 DMA_BIT_MASK(32));
5046 if (err) { 5429 if (err) {
5047 dev_err(&pdev->dev, "No usable DMA " 5430 dev_err(&pdev->dev, "No usable DMA "
5048 "configuration, aborting\n"); 5431 "configuration, aborting\n");
@@ -5073,6 +5456,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5073 5456
5074 SET_NETDEV_DEV(netdev, &pdev->dev); 5457 SET_NETDEV_DEV(netdev, &pdev->dev);
5075 5458
5459 netdev->irq = pdev->irq;
5460
5076 pci_set_drvdata(pdev, netdev); 5461 pci_set_drvdata(pdev, netdev);
5077 adapter = netdev_priv(netdev); 5462 adapter = netdev_priv(netdev);
5078 hw = &adapter->hw; 5463 hw = &adapter->hw;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index f775a481063d..0f4077c3d538 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -351,6 +351,11 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
351 adapter->itr_setting = adapter->itr; 351 adapter->itr_setting = adapter->itr;
352 adapter->itr = 20000; 352 adapter->itr = 20000;
353 break; 353 break;
354 case 4:
355 e_info("%s set to simplified (2000-8000 ints) "
356 "mode\n", opt.name);
357 adapter->itr_setting = 4;
358 break;
354 default: 359 default:
355 /* 360 /*
356 * Save the setting, because the dynamic bits 361 * Save the setting, because the dynamic bits
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index fa311a950996..0630980a2722 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#define DRV_NAME "ehea" 42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0102" 43#define DRV_VERSION "EHEA_0103"
44 44
45/* eHEA capability flags */ 45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1 46#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 3f445efa9482..39774817c3eb 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -791,11 +791,17 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
791 cqe_counter++; 791 cqe_counter++;
792 rmb(); 792 rmb();
793 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { 793 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
794 ehea_error("Send Completion Error: Resetting port"); 794 ehea_error("Bad send completion status=0x%04X",
795 cqe->status);
796
795 if (netif_msg_tx_err(pr->port)) 797 if (netif_msg_tx_err(pr->port))
796 ehea_dump(cqe, sizeof(*cqe), "Send CQE"); 798 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
797 ehea_schedule_port_reset(pr->port); 799
798 break; 800 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
801 ehea_error("Resetting port");
802 ehea_schedule_port_reset(pr->port);
803 break;
804 }
799 } 805 }
800 806
801 if (netif_msg_tx_done(pr->port)) 807 if (netif_msg_tx_done(pr->port))
@@ -901,6 +907,8 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
901 struct ehea_eqe *eqe; 907 struct ehea_eqe *eqe;
902 struct ehea_qp *qp; 908 struct ehea_qp *qp;
903 u32 qp_token; 909 u32 qp_token;
910 u64 resource_type, aer, aerr;
911 int reset_port = 0;
904 912
905 eqe = ehea_poll_eq(port->qp_eq); 913 eqe = ehea_poll_eq(port->qp_eq);
906 914
@@ -910,11 +918,24 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
910 eqe->entry, qp_token); 918 eqe->entry, qp_token);
911 919
912 qp = port->port_res[qp_token].qp; 920 qp = port->port_res[qp_token].qp;
913 ehea_error_data(port->adapter, qp->fw_handle); 921
922 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
923 &aer, &aerr);
924
925 if (resource_type == EHEA_AER_RESTYPE_QP) {
926 if ((aer & EHEA_AER_RESET_MASK) ||
927 (aerr & EHEA_AERR_RESET_MASK))
928 reset_port = 1;
929 } else
930 reset_port = 1; /* Reset in case of CQ or EQ error */
931
914 eqe = ehea_poll_eq(port->qp_eq); 932 eqe = ehea_poll_eq(port->qp_eq);
915 } 933 }
916 934
917 ehea_schedule_port_reset(port); 935 if (reset_port) {
936 ehea_error("Resetting port");
937 ehea_schedule_port_reset(port);
938 }
918 939
919 return IRQ_HANDLED; 940 return IRQ_HANDLED;
920} 941}
@@ -2868,7 +2889,6 @@ static void ehea_rereg_mrs(struct work_struct *work)
2868 int ret, i; 2889 int ret, i;
2869 struct ehea_adapter *adapter; 2890 struct ehea_adapter *adapter;
2870 2891
2871 mutex_lock(&dlpar_mem_lock);
2872 ehea_info("LPAR memory changed - re-initializing driver"); 2892 ehea_info("LPAR memory changed - re-initializing driver");
2873 2893
2874 list_for_each_entry(adapter, &adapter_list, list) 2894 list_for_each_entry(adapter, &adapter_list, list)
@@ -2938,7 +2958,6 @@ static void ehea_rereg_mrs(struct work_struct *work)
2938 } 2958 }
2939 ehea_info("re-initializing driver complete"); 2959 ehea_info("re-initializing driver complete");
2940out: 2960out:
2941 mutex_unlock(&dlpar_mem_lock);
2942 return; 2961 return;
2943} 2962}
2944 2963
@@ -3521,7 +3540,14 @@ void ehea_crash_handler(void)
3521static int ehea_mem_notifier(struct notifier_block *nb, 3540static int ehea_mem_notifier(struct notifier_block *nb,
3522 unsigned long action, void *data) 3541 unsigned long action, void *data)
3523{ 3542{
3543 int ret = NOTIFY_BAD;
3524 struct memory_notify *arg = data; 3544 struct memory_notify *arg = data;
3545
3546 if (!mutex_trylock(&dlpar_mem_lock)) {
3547 ehea_info("ehea_mem_notifier must not be called parallelized");
3548 goto out;
3549 }
3550
3525 switch (action) { 3551 switch (action) {
3526 case MEM_CANCEL_OFFLINE: 3552 case MEM_CANCEL_OFFLINE:
3527 ehea_info("memory offlining canceled"); 3553 ehea_info("memory offlining canceled");
@@ -3530,14 +3556,14 @@ static int ehea_mem_notifier(struct notifier_block *nb,
3530 ehea_info("memory is going online"); 3556 ehea_info("memory is going online");
3531 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 3557 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3532 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) 3558 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3533 return NOTIFY_BAD; 3559 goto out_unlock;
3534 ehea_rereg_mrs(NULL); 3560 ehea_rereg_mrs(NULL);
3535 break; 3561 break;
3536 case MEM_GOING_OFFLINE: 3562 case MEM_GOING_OFFLINE:
3537 ehea_info("memory is going offline"); 3563 ehea_info("memory is going offline");
3538 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 3564 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3539 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) 3565 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3540 return NOTIFY_BAD; 3566 goto out_unlock;
3541 ehea_rereg_mrs(NULL); 3567 ehea_rereg_mrs(NULL);
3542 break; 3568 break;
3543 default: 3569 default:
@@ -3545,8 +3571,12 @@ static int ehea_mem_notifier(struct notifier_block *nb,
3545 } 3571 }
3546 3572
3547 ehea_update_firmware_handles(); 3573 ehea_update_firmware_handles();
3574 ret = NOTIFY_OK;
3548 3575
3549 return NOTIFY_OK; 3576out_unlock:
3577 mutex_unlock(&dlpar_mem_lock);
3578out:
3579 return ret;
3550} 3580}
3551 3581
3552static struct notifier_block ehea_mem_nb = { 3582static struct notifier_block ehea_mem_nb = {
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index a1b4c7e56367..89128b6373e3 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -229,14 +229,14 @@ u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
229 229
230int ehea_destroy_cq(struct ehea_cq *cq) 230int ehea_destroy_cq(struct ehea_cq *cq)
231{ 231{
232 u64 hret; 232 u64 hret, aer, aerr;
233 if (!cq) 233 if (!cq)
234 return 0; 234 return 0;
235 235
236 hcp_epas_dtor(&cq->epas); 236 hcp_epas_dtor(&cq->epas);
237 hret = ehea_destroy_cq_res(cq, NORMAL_FREE); 237 hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
238 if (hret == H_R_STATE) { 238 if (hret == H_R_STATE) {
239 ehea_error_data(cq->adapter, cq->fw_handle); 239 ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
240 hret = ehea_destroy_cq_res(cq, FORCE_FREE); 240 hret = ehea_destroy_cq_res(cq, FORCE_FREE);
241 } 241 }
242 242
@@ -357,7 +357,7 @@ u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
357 357
358int ehea_destroy_eq(struct ehea_eq *eq) 358int ehea_destroy_eq(struct ehea_eq *eq)
359{ 359{
360 u64 hret; 360 u64 hret, aer, aerr;
361 if (!eq) 361 if (!eq)
362 return 0; 362 return 0;
363 363
@@ -365,7 +365,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
365 365
366 hret = ehea_destroy_eq_res(eq, NORMAL_FREE); 366 hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
367 if (hret == H_R_STATE) { 367 if (hret == H_R_STATE) {
368 ehea_error_data(eq->adapter, eq->fw_handle); 368 ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
369 hret = ehea_destroy_eq_res(eq, FORCE_FREE); 369 hret = ehea_destroy_eq_res(eq, FORCE_FREE);
370 } 370 }
371 371
@@ -540,7 +540,7 @@ u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
540 540
541int ehea_destroy_qp(struct ehea_qp *qp) 541int ehea_destroy_qp(struct ehea_qp *qp)
542{ 542{
543 u64 hret; 543 u64 hret, aer, aerr;
544 if (!qp) 544 if (!qp)
545 return 0; 545 return 0;
546 546
@@ -548,7 +548,7 @@ int ehea_destroy_qp(struct ehea_qp *qp)
548 548
549 hret = ehea_destroy_qp_res(qp, NORMAL_FREE); 549 hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
550 if (hret == H_R_STATE) { 550 if (hret == H_R_STATE) {
551 ehea_error_data(qp->adapter, qp->fw_handle); 551 ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
552 hret = ehea_destroy_qp_res(qp, FORCE_FREE); 552 hret = ehea_destroy_qp_res(qp, FORCE_FREE);
553 } 553 }
554 554
@@ -986,42 +986,45 @@ void print_error_data(u64 *data)
986 if (length > EHEA_PAGESIZE) 986 if (length > EHEA_PAGESIZE)
987 length = EHEA_PAGESIZE; 987 length = EHEA_PAGESIZE;
988 988
989 if (type == 0x8) /* Queue Pair */ 989 if (type == EHEA_AER_RESTYPE_QP)
990 ehea_error("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, " 990 ehea_error("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, "
991 "port=%llX", resource, data[6], data[12], data[22]); 991 "port=%llX", resource, data[6], data[12], data[22]);
992 992 else if (type == EHEA_AER_RESTYPE_CQ)
993 if (type == 0x4) /* Completion Queue */
994 ehea_error("CQ (resource=%llX) state: AER=0x%llX", resource, 993 ehea_error("CQ (resource=%llX) state: AER=0x%llX", resource,
995 data[6]); 994 data[6]);
996 995 else if (type == EHEA_AER_RESTYPE_EQ)
997 if (type == 0x3) /* Event Queue */
998 ehea_error("EQ (resource=%llX) state: AER=0x%llX", resource, 996 ehea_error("EQ (resource=%llX) state: AER=0x%llX", resource,
999 data[6]); 997 data[6]);
1000 998
1001 ehea_dump(data, length, "error data"); 999 ehea_dump(data, length, "error data");
1002} 1000}
1003 1001
1004void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle) 1002u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
1003 u64 *aer, u64 *aerr)
1005{ 1004{
1006 unsigned long ret; 1005 unsigned long ret;
1007 u64 *rblock; 1006 u64 *rblock;
1007 u64 type = 0;
1008 1008
1009 rblock = (void *)get_zeroed_page(GFP_KERNEL); 1009 rblock = (void *)get_zeroed_page(GFP_KERNEL);
1010 if (!rblock) { 1010 if (!rblock) {
1011 ehea_error("Cannot allocate rblock memory."); 1011 ehea_error("Cannot allocate rblock memory.");
1012 return; 1012 goto out;
1013 } 1013 }
1014 1014
1015 ret = ehea_h_error_data(adapter->handle, 1015 ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
1016 res_handle,
1017 rblock);
1018 1016
1019 if (ret == H_R_STATE) 1017 if (ret == H_SUCCESS) {
1020 ehea_error("No error data is available: %llX.", res_handle); 1018 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
1021 else if (ret == H_SUCCESS) 1019 *aer = rblock[6];
1020 *aerr = rblock[12];
1022 print_error_data(rblock); 1021 print_error_data(rblock);
1023 else 1022 } else if (ret == H_R_STATE) {
1023 ehea_error("No error data available: %llX.", res_handle);
1024 } else
1024 ehea_error("Error data could not be fetched: %llX", res_handle); 1025 ehea_error("Error data could not be fetched: %llX", res_handle);
1025 1026
1026 free_page((unsigned long)rblock); 1027 free_page((unsigned long)rblock);
1028out:
1029 return type;
1027} 1030}
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index 0817c1e74a19..882c50c9c34f 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -154,6 +154,9 @@ struct ehea_rwqe {
154#define EHEA_CQE_STAT_ERR_IP 0x2000 154#define EHEA_CQE_STAT_ERR_IP 0x2000
155#define EHEA_CQE_STAT_ERR_CRC 0x1000 155#define EHEA_CQE_STAT_ERR_CRC 0x1000
156 156
157/* Defines which bad send cqe stati lead to a port reset */
158#define EHEA_CQE_STAT_RESET_MASK 0x0002
159
157struct ehea_cqe { 160struct ehea_cqe {
158 u64 wr_id; /* work request ID from WQE */ 161 u64 wr_id; /* work request ID from WQE */
159 u8 type; 162 u8 type;
@@ -187,6 +190,14 @@ struct ehea_cqe {
187#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55) 190#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55)
188#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63) 191#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
189 192
193#define EHEA_AER_RESTYPE_QP 0x8
194#define EHEA_AER_RESTYPE_CQ 0x4
195#define EHEA_AER_RESTYPE_EQ 0x3
196
197/* Defines which affiliated errors lead to a port reset */
198#define EHEA_AER_RESET_MASK 0xFFFFFFFFFEFFFFFFULL
199#define EHEA_AERR_RESET_MASK 0xFFFFFFFFFFFFFFFFULL
200
190struct ehea_eqe { 201struct ehea_eqe {
191 u64 entry; 202 u64 entry;
192}; 203};
@@ -379,7 +390,8 @@ int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
379 390
380int ehea_rem_mr(struct ehea_mr *mr); 391int ehea_rem_mr(struct ehea_mr *mr);
381 392
382void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle); 393u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
394 u64 *aer, u64 *aerr);
383 395
384int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages); 396int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
385int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages); 397int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index 6bd03c8b8886..ad1bc7317df6 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -1040,7 +1040,6 @@ static int ethoc_probe(struct platform_device *pdev)
1040 netdev->features |= 0; 1040 netdev->features |= 0;
1041 1041
1042 /* setup NAPI */ 1042 /* setup NAPI */
1043 memset(&priv->napi, 0, sizeof(priv->napi));
1044 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64); 1043 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
1045 1044
1046 spin_lock_init(&priv->rx_lock); 1045 spin_lock_init(&priv->rx_lock);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index a1c0e7bb70e8..e282d0ae6a3d 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -1104,20 +1104,16 @@ static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1104 1104
1105static void nv_napi_enable(struct net_device *dev) 1105static void nv_napi_enable(struct net_device *dev)
1106{ 1106{
1107#ifdef CONFIG_FORCEDETH_NAPI
1108 struct fe_priv *np = get_nvpriv(dev); 1107 struct fe_priv *np = get_nvpriv(dev);
1109 1108
1110 napi_enable(&np->napi); 1109 napi_enable(&np->napi);
1111#endif
1112} 1110}
1113 1111
1114static void nv_napi_disable(struct net_device *dev) 1112static void nv_napi_disable(struct net_device *dev)
1115{ 1113{
1116#ifdef CONFIG_FORCEDETH_NAPI
1117 struct fe_priv *np = get_nvpriv(dev); 1114 struct fe_priv *np = get_nvpriv(dev);
1118 1115
1119 napi_disable(&np->napi); 1116 napi_disable(&np->napi);
1120#endif
1121} 1117}
1122 1118
1123#define MII_READ (-1) 1119#define MII_READ (-1)
@@ -1810,7 +1806,6 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
1810} 1806}
1811 1807
1812/* If rx bufs are exhausted called after 50ms to attempt to refresh */ 1808/* If rx bufs are exhausted called after 50ms to attempt to refresh */
1813#ifdef CONFIG_FORCEDETH_NAPI
1814static void nv_do_rx_refill(unsigned long data) 1809static void nv_do_rx_refill(unsigned long data)
1815{ 1810{
1816 struct net_device *dev = (struct net_device *) data; 1811 struct net_device *dev = (struct net_device *) data;
@@ -1819,41 +1814,6 @@ static void nv_do_rx_refill(unsigned long data)
1819 /* Just reschedule NAPI rx processing */ 1814 /* Just reschedule NAPI rx processing */
1820 napi_schedule(&np->napi); 1815 napi_schedule(&np->napi);
1821} 1816}
1822#else
1823static void nv_do_rx_refill(unsigned long data)
1824{
1825 struct net_device *dev = (struct net_device *) data;
1826 struct fe_priv *np = netdev_priv(dev);
1827 int retcode;
1828
1829 if (!using_multi_irqs(dev)) {
1830 if (np->msi_flags & NV_MSI_X_ENABLED)
1831 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1832 else
1833 disable_irq(np->pci_dev->irq);
1834 } else {
1835 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1836 }
1837 if (!nv_optimized(np))
1838 retcode = nv_alloc_rx(dev);
1839 else
1840 retcode = nv_alloc_rx_optimized(dev);
1841 if (retcode) {
1842 spin_lock_irq(&np->lock);
1843 if (!np->in_shutdown)
1844 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1845 spin_unlock_irq(&np->lock);
1846 }
1847 if (!using_multi_irqs(dev)) {
1848 if (np->msi_flags & NV_MSI_X_ENABLED)
1849 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1850 else
1851 enable_irq(np->pci_dev->irq);
1852 } else {
1853 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1854 }
1855}
1856#endif
1857 1817
1858static void nv_init_rx(struct net_device *dev) 1818static void nv_init_rx(struct net_device *dev)
1859{ 1819{
@@ -2816,11 +2776,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2816 skb->protocol = eth_type_trans(skb, dev); 2776 skb->protocol = eth_type_trans(skb, dev);
2817 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", 2777 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2818 dev->name, len, skb->protocol); 2778 dev->name, len, skb->protocol);
2819#ifdef CONFIG_FORCEDETH_NAPI 2779 napi_gro_receive(&np->napi, skb);
2820 netif_receive_skb(skb);
2821#else
2822 netif_rx(skb);
2823#endif
2824 dev->stats.rx_packets++; 2780 dev->stats.rx_packets++;
2825 dev->stats.rx_bytes += len; 2781 dev->stats.rx_bytes += len;
2826next_pkt: 2782next_pkt:
@@ -2909,27 +2865,14 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2909 dev->name, len, skb->protocol); 2865 dev->name, len, skb->protocol);
2910 2866
2911 if (likely(!np->vlangrp)) { 2867 if (likely(!np->vlangrp)) {
2912#ifdef CONFIG_FORCEDETH_NAPI 2868 napi_gro_receive(&np->napi, skb);
2913 netif_receive_skb(skb);
2914#else
2915 netif_rx(skb);
2916#endif
2917 } else { 2869 } else {
2918 vlanflags = le32_to_cpu(np->get_rx.ex->buflow); 2870 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2919 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { 2871 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2920#ifdef CONFIG_FORCEDETH_NAPI 2872 vlan_gro_receive(&np->napi, np->vlangrp,
2921 vlan_hwaccel_receive_skb(skb, np->vlangrp, 2873 vlanflags & NV_RX3_VLAN_TAG_MASK, skb);
2922 vlanflags & NV_RX3_VLAN_TAG_MASK);
2923#else
2924 vlan_hwaccel_rx(skb, np->vlangrp,
2925 vlanflags & NV_RX3_VLAN_TAG_MASK);
2926#endif
2927 } else { 2874 } else {
2928#ifdef CONFIG_FORCEDETH_NAPI 2875 napi_gro_receive(&np->napi, skb);
2929 netif_receive_skb(skb);
2930#else
2931 netif_rx(skb);
2932#endif
2933 } 2876 }
2934 } 2877 }
2935 2878
@@ -3496,10 +3439,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3496 struct net_device *dev = (struct net_device *) data; 3439 struct net_device *dev = (struct net_device *) data;
3497 struct fe_priv *np = netdev_priv(dev); 3440 struct fe_priv *np = netdev_priv(dev);
3498 u8 __iomem *base = get_hwbase(dev); 3441 u8 __iomem *base = get_hwbase(dev);
3499#ifndef CONFIG_FORCEDETH_NAPI
3500 int total_work = 0;
3501 int loop_count = 0;
3502#endif
3503 3442
3504 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); 3443 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
3505 3444
@@ -3516,7 +3455,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3516 3455
3517 nv_msi_workaround(np); 3456 nv_msi_workaround(np);
3518 3457
3519#ifdef CONFIG_FORCEDETH_NAPI
3520 if (napi_schedule_prep(&np->napi)) { 3458 if (napi_schedule_prep(&np->napi)) {
3521 /* 3459 /*
3522 * Disable further irq's (msix not enabled with napi) 3460 * Disable further irq's (msix not enabled with napi)
@@ -3525,65 +3463,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3525 __napi_schedule(&np->napi); 3463 __napi_schedule(&np->napi);
3526 } 3464 }
3527 3465
3528#else
3529 do
3530 {
3531 int work = 0;
3532 if ((work = nv_rx_process(dev, RX_WORK_PER_LOOP))) {
3533 if (unlikely(nv_alloc_rx(dev))) {
3534 spin_lock(&np->lock);
3535 if (!np->in_shutdown)
3536 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3537 spin_unlock(&np->lock);
3538 }
3539 }
3540
3541 spin_lock(&np->lock);
3542 work += nv_tx_done(dev, TX_WORK_PER_LOOP);
3543 spin_unlock(&np->lock);
3544
3545 if (!work)
3546 break;
3547
3548 total_work += work;
3549
3550 loop_count++;
3551 }
3552 while (loop_count < max_interrupt_work);
3553
3554 if (nv_change_interrupt_mode(dev, total_work)) {
3555 /* setup new irq mask */
3556 writel(np->irqmask, base + NvRegIrqMask);
3557 }
3558
3559 if (unlikely(np->events & NVREG_IRQ_LINK)) {
3560 spin_lock(&np->lock);
3561 nv_link_irq(dev);
3562 spin_unlock(&np->lock);
3563 }
3564 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3565 spin_lock(&np->lock);
3566 nv_linkchange(dev);
3567 spin_unlock(&np->lock);
3568 np->link_timeout = jiffies + LINK_TIMEOUT;
3569 }
3570 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3571 spin_lock(&np->lock);
3572 /* disable interrupts on the nic */
3573 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3574 writel(0, base + NvRegIrqMask);
3575 else
3576 writel(np->irqmask, base + NvRegIrqMask);
3577 pci_push(base);
3578
3579 if (!np->in_shutdown) {
3580 np->nic_poll_irq = np->irqmask;
3581 np->recover_error = 1;
3582 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3583 }
3584 spin_unlock(&np->lock);
3585 }
3586#endif
3587 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); 3466 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
3588 3467
3589 return IRQ_HANDLED; 3468 return IRQ_HANDLED;
@@ -3599,10 +3478,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3599 struct net_device *dev = (struct net_device *) data; 3478 struct net_device *dev = (struct net_device *) data;
3600 struct fe_priv *np = netdev_priv(dev); 3479 struct fe_priv *np = netdev_priv(dev);
3601 u8 __iomem *base = get_hwbase(dev); 3480 u8 __iomem *base = get_hwbase(dev);
3602#ifndef CONFIG_FORCEDETH_NAPI
3603 int total_work = 0;
3604 int loop_count = 0;
3605#endif
3606 3481
3607 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name); 3482 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
3608 3483
@@ -3619,7 +3494,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3619 3494
3620 nv_msi_workaround(np); 3495 nv_msi_workaround(np);
3621 3496
3622#ifdef CONFIG_FORCEDETH_NAPI
3623 if (napi_schedule_prep(&np->napi)) { 3497 if (napi_schedule_prep(&np->napi)) {
3624 /* 3498 /*
3625 * Disable further irq's (msix not enabled with napi) 3499 * Disable further irq's (msix not enabled with napi)
@@ -3627,66 +3501,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3627 writel(0, base + NvRegIrqMask); 3501 writel(0, base + NvRegIrqMask);
3628 __napi_schedule(&np->napi); 3502 __napi_schedule(&np->napi);
3629 } 3503 }
3630#else
3631 do
3632 {
3633 int work = 0;
3634 if ((work = nv_rx_process_optimized(dev, RX_WORK_PER_LOOP))) {
3635 if (unlikely(nv_alloc_rx_optimized(dev))) {
3636 spin_lock(&np->lock);
3637 if (!np->in_shutdown)
3638 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3639 spin_unlock(&np->lock);
3640 }
3641 }
3642
3643 spin_lock(&np->lock);
3644 work += nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3645 spin_unlock(&np->lock);
3646
3647 if (!work)
3648 break;
3649
3650 total_work += work;
3651
3652 loop_count++;
3653 }
3654 while (loop_count < max_interrupt_work);
3655
3656 if (nv_change_interrupt_mode(dev, total_work)) {
3657 /* setup new irq mask */
3658 writel(np->irqmask, base + NvRegIrqMask);
3659 }
3660
3661 if (unlikely(np->events & NVREG_IRQ_LINK)) {
3662 spin_lock(&np->lock);
3663 nv_link_irq(dev);
3664 spin_unlock(&np->lock);
3665 }
3666 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3667 spin_lock(&np->lock);
3668 nv_linkchange(dev);
3669 spin_unlock(&np->lock);
3670 np->link_timeout = jiffies + LINK_TIMEOUT;
3671 }
3672 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3673 spin_lock(&np->lock);
3674 /* disable interrupts on the nic */
3675 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3676 writel(0, base + NvRegIrqMask);
3677 else
3678 writel(np->irqmask, base + NvRegIrqMask);
3679 pci_push(base);
3680
3681 if (!np->in_shutdown) {
3682 np->nic_poll_irq = np->irqmask;
3683 np->recover_error = 1;
3684 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3685 }
3686 spin_unlock(&np->lock);
3687 }
3688
3689#endif
3690 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name); 3504 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
3691 3505
3692 return IRQ_HANDLED; 3506 return IRQ_HANDLED;
@@ -3735,7 +3549,6 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3735 return IRQ_RETVAL(i); 3549 return IRQ_RETVAL(i);
3736} 3550}
3737 3551
3738#ifdef CONFIG_FORCEDETH_NAPI
3739static int nv_napi_poll(struct napi_struct *napi, int budget) 3552static int nv_napi_poll(struct napi_struct *napi, int budget)
3740{ 3553{
3741 struct fe_priv *np = container_of(napi, struct fe_priv, napi); 3554 struct fe_priv *np = container_of(napi, struct fe_priv, napi);
@@ -3743,23 +3556,27 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3743 u8 __iomem *base = get_hwbase(dev); 3556 u8 __iomem *base = get_hwbase(dev);
3744 unsigned long flags; 3557 unsigned long flags;
3745 int retcode; 3558 int retcode;
3746 int tx_work, rx_work; 3559 int rx_count, tx_work=0, rx_work=0;
3747 3560
3748 if (!nv_optimized(np)) { 3561 do {
3749 spin_lock_irqsave(&np->lock, flags); 3562 if (!nv_optimized(np)) {
3750 tx_work = nv_tx_done(dev, np->tx_ring_size); 3563 spin_lock_irqsave(&np->lock, flags);
3751 spin_unlock_irqrestore(&np->lock, flags); 3564 tx_work += nv_tx_done(dev, np->tx_ring_size);
3565 spin_unlock_irqrestore(&np->lock, flags);
3752 3566
3753 rx_work = nv_rx_process(dev, budget); 3567 rx_count = nv_rx_process(dev, budget - rx_work);
3754 retcode = nv_alloc_rx(dev); 3568 retcode = nv_alloc_rx(dev);
3755 } else { 3569 } else {
3756 spin_lock_irqsave(&np->lock, flags); 3570 spin_lock_irqsave(&np->lock, flags);
3757 tx_work = nv_tx_done_optimized(dev, np->tx_ring_size); 3571 tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
3758 spin_unlock_irqrestore(&np->lock, flags); 3572 spin_unlock_irqrestore(&np->lock, flags);
3759 3573
3760 rx_work = nv_rx_process_optimized(dev, budget); 3574 rx_count = nv_rx_process_optimized(dev,
3761 retcode = nv_alloc_rx_optimized(dev); 3575 budget - rx_work);
3762 } 3576 retcode = nv_alloc_rx_optimized(dev);
3577 }
3578 } while (retcode == 0 &&
3579 rx_count > 0 && (rx_work += rx_count) < budget);
3763 3580
3764 if (retcode) { 3581 if (retcode) {
3765 spin_lock_irqsave(&np->lock, flags); 3582 spin_lock_irqsave(&np->lock, flags);
@@ -3802,7 +3619,6 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3802 } 3619 }
3803 return rx_work; 3620 return rx_work;
3804} 3621}
3805#endif
3806 3622
3807static irqreturn_t nv_nic_irq_rx(int foo, void *data) 3623static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3808{ 3624{
@@ -5708,6 +5524,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5708 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 5524 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5709 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 5525 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
5710 dev->features |= NETIF_F_TSO; 5526 dev->features |= NETIF_F_TSO;
5527 dev->features |= NETIF_F_GRO;
5711 } 5528 }
5712 5529
5713 np->vlanctl_bits = 0; 5530 np->vlanctl_bits = 0;
@@ -5760,9 +5577,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5760 else 5577 else
5761 dev->netdev_ops = &nv_netdev_ops_optimized; 5578 dev->netdev_ops = &nv_netdev_ops_optimized;
5762 5579
5763#ifdef CONFIG_FORCEDETH_NAPI
5764 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); 5580 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5765#endif
5766 SET_ETHTOOL_OPS(dev, &ops); 5581 SET_ETHTOOL_OPS(dev, &ops);
5767 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 5582 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5768 5583
@@ -5865,7 +5680,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5865 /* msix has had reported issues when modifying irqmask 5680 /* msix has had reported issues when modifying irqmask
5866 as in the case of napi, therefore, disable for now 5681 as in the case of napi, therefore, disable for now
5867 */ 5682 */
5868#ifndef CONFIG_FORCEDETH_NAPI 5683#if 0
5869 np->msi_flags |= NV_MSI_X_CAPABLE; 5684 np->msi_flags |= NV_MSI_X_CAPABLE;
5870#endif 5685#endif
5871 } 5686 }
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index d5160edf2fcf..3acac5f930c8 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -205,8 +205,6 @@ static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
205static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np) 205static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
206{ 206{
207 struct gfar __iomem *enet_regs; 207 struct gfar __iomem *enet_regs;
208 u32 __iomem *ioremap_tbipa;
209 u64 addr, size;
210 208
211 /* 209 /*
212 * This is mildly evil, but so is our hardware for doing this. 210 * This is mildly evil, but so is our hardware for doing this.
@@ -220,9 +218,7 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct devi
220 return &enet_regs->tbipa; 218 return &enet_regs->tbipa;
221 } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") || 219 } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
222 of_device_is_compatible(np, "fsl,etsec2-tbi")) { 220 of_device_is_compatible(np, "fsl,etsec2-tbi")) {
223 addr = of_translate_address(np, of_get_address(np, 1, &size, NULL)); 221 return of_iomap(np, 1);
224 ioremap_tbipa = ioremap(addr, size);
225 return ioremap_tbipa;
226 } else 222 } else
227 return NULL; 223 return NULL;
228} 224}
@@ -279,6 +275,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
279 u32 __iomem *tbipa; 275 u32 __iomem *tbipa;
280 struct mii_bus *new_bus; 276 struct mii_bus *new_bus;
281 int tbiaddr = -1; 277 int tbiaddr = -1;
278 const u32 *addrp;
282 u64 addr = 0, size = 0; 279 u64 addr = 0, size = 0;
283 int err = 0; 280 int err = 0;
284 281
@@ -297,8 +294,19 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
297 new_bus->priv = priv; 294 new_bus->priv = priv;
298 fsl_pq_mdio_bus_name(new_bus->id, np); 295 fsl_pq_mdio_bus_name(new_bus->id, np);
299 296
297 addrp = of_get_address(np, 0, &size, NULL);
298 if (!addrp) {
299 err = -EINVAL;
300 goto err_free_bus;
301 }
302
300 /* Set the PHY base address */ 303 /* Set the PHY base address */
301 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL)); 304 addr = of_translate_address(np, addrp);
305 if (addr == OF_BAD_ADDR) {
306 err = -EINVAL;
307 goto err_free_bus;
308 }
309
302 map = ioremap(addr, size); 310 map = ioremap(addr, size);
303 if (!map) { 311 if (!map) {
304 err = -ENOMEM; 312 err = -ENOMEM;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 032073d1e3d2..5267c27e3174 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -558,12 +558,8 @@ static int gfar_parse_group(struct device_node *np,
558 struct gfar_private *priv, const char *model) 558 struct gfar_private *priv, const char *model)
559{ 559{
560 u32 *queue_mask; 560 u32 *queue_mask;
561 u64 addr, size;
562
563 addr = of_translate_address(np,
564 of_get_address(np, 0, &size, NULL));
565 priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
566 561
562 priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
567 if (!priv->gfargrp[priv->num_grps].regs) 563 if (!priv->gfargrp[priv->num_grps].regs)
568 return -ENOMEM; 564 return -ENOMEM;
569 565
@@ -1571,9 +1567,9 @@ static void gfar_halt_nodisable(struct net_device *dev)
1571 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1567 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1572 gfar_write(&regs->dmactrl, tempval); 1568 gfar_write(&regs->dmactrl, tempval);
1573 1569
1574 while (!(gfar_read(&regs->ievent) & 1570 spin_event_timeout(((gfar_read(&regs->ievent) &
1575 (IEVENT_GRSC | IEVENT_GTSC))) 1571 (IEVENT_GRSC | IEVENT_GTSC)) ==
1576 cpu_relax(); 1572 (IEVENT_GRSC | IEVENT_GTSC)), -1, 0);
1577 } 1573 }
1578} 1574}
1579 1575
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 31d24e0e76de..24d9be64342f 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -610,11 +610,7 @@
610#define IGP_LED3_MODE 0x07000000 610#define IGP_LED3_MODE 0x07000000
611 611
612/* PCI/PCI-X/PCI-EX Config space */ 612/* PCI/PCI-X/PCI-EX Config space */
613#define PCIE_LINK_STATUS 0x12
614#define PCIE_DEVICE_CONTROL2 0x28 613#define PCIE_DEVICE_CONTROL2 0x28
615
616#define PCIE_LINK_WIDTH_MASK 0x3F0
617#define PCIE_LINK_WIDTH_SHIFT 4
618#define PCIE_DEVICE_CONTROL2_16ms 0x0005 614#define PCIE_DEVICE_CONTROL2_16ms 0x0005
619 615
620#define PHY_REVISION_MASK 0xFFFFFFF0 616#define PHY_REVISION_MASK 0xFFFFFFF0
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index be8d010e4021..90c5e01e9235 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -53,17 +53,30 @@ s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
53 u16 pcie_link_status; 53 u16 pcie_link_status;
54 54
55 bus->type = e1000_bus_type_pci_express; 55 bus->type = e1000_bus_type_pci_express;
56 bus->speed = e1000_bus_speed_2500;
57 56
58 ret_val = igb_read_pcie_cap_reg(hw, 57 ret_val = igb_read_pcie_cap_reg(hw,
59 PCIE_LINK_STATUS, 58 PCI_EXP_LNKSTA,
60 &pcie_link_status); 59 &pcie_link_status);
61 if (ret_val) 60 if (ret_val) {
62 bus->width = e1000_bus_width_unknown; 61 bus->width = e1000_bus_width_unknown;
63 else 62 bus->speed = e1000_bus_speed_unknown;
63 } else {
64 switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
65 case PCI_EXP_LNKSTA_CLS_2_5GB:
66 bus->speed = e1000_bus_speed_2500;
67 break;
68 case PCI_EXP_LNKSTA_CLS_5_0GB:
69 bus->speed = e1000_bus_speed_5000;
70 break;
71 default:
72 bus->speed = e1000_bus_speed_unknown;
73 break;
74 }
75
64 bus->width = (enum e1000_bus_width)((pcie_link_status & 76 bus->width = (enum e1000_bus_width)((pcie_link_status &
65 PCIE_LINK_WIDTH_MASK) >> 77 PCI_EXP_LNKSTA_NLW) >>
66 PCIE_LINK_WIDTH_SHIFT); 78 PCI_EXP_LNKSTA_NLW_SHIFT);
79 }
67 80
68 reg = rd32(E1000_STATUS); 81 reg = rd32(E1000_STATUS);
69 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; 82 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 7d288ccca1ca..6e63d9a7fc75 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -141,8 +141,10 @@ struct igb_buffer {
141 unsigned long time_stamp; 141 unsigned long time_stamp;
142 u16 length; 142 u16 length;
143 u16 next_to_watch; 143 u16 next_to_watch;
144 u16 mapped_as_page; 144 unsigned int bytecount;
145 u16 gso_segs; 145 u16 gso_segs;
146 union skb_shared_tx shtx;
147 u8 mapped_as_page;
146 }; 148 };
147 /* RX */ 149 /* RX */
148 struct { 150 struct {
@@ -186,7 +188,7 @@ struct igb_q_vector {
186struct igb_ring { 188struct igb_ring {
187 struct igb_q_vector *q_vector; /* backlink to q_vector */ 189 struct igb_q_vector *q_vector; /* backlink to q_vector */
188 struct net_device *netdev; /* back pointer to net_device */ 190 struct net_device *netdev; /* back pointer to net_device */
189 struct pci_dev *pdev; /* pci device for dma mapping */ 191 struct device *dev; /* device pointer for dma mapping */
190 dma_addr_t dma; /* phys address of the ring */ 192 dma_addr_t dma; /* phys address of the ring */
191 void *desc; /* descriptor ring memory */ 193 void *desc; /* descriptor ring memory */
192 unsigned int size; /* length of desc. ring in bytes */ 194 unsigned int size; /* length of desc. ring in bytes */
@@ -338,7 +340,6 @@ enum igb_boards {
338extern char igb_driver_name[]; 340extern char igb_driver_name[];
339extern char igb_driver_version[]; 341extern char igb_driver_version[];
340 342
341extern char *igb_get_hw_dev_name(struct e1000_hw *hw);
342extern int igb_up(struct igb_adapter *); 343extern int igb_up(struct igb_adapter *);
343extern void igb_down(struct igb_adapter *); 344extern void igb_down(struct igb_adapter *);
344extern void igb_reinit_locked(struct igb_adapter *); 345extern void igb_reinit_locked(struct igb_adapter *);
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 1b8fd7f4064d..f2ebf927e4bc 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1394,7 +1394,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
1394 1394
1395 /* Setup Tx descriptor ring and Tx buffers */ 1395 /* Setup Tx descriptor ring and Tx buffers */
1396 tx_ring->count = IGB_DEFAULT_TXD; 1396 tx_ring->count = IGB_DEFAULT_TXD;
1397 tx_ring->pdev = adapter->pdev; 1397 tx_ring->dev = &adapter->pdev->dev;
1398 tx_ring->netdev = adapter->netdev; 1398 tx_ring->netdev = adapter->netdev;
1399 tx_ring->reg_idx = adapter->vfs_allocated_count; 1399 tx_ring->reg_idx = adapter->vfs_allocated_count;
1400 1400
@@ -1408,7 +1408,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
1408 1408
1409 /* Setup Rx descriptor ring and Rx buffers */ 1409 /* Setup Rx descriptor ring and Rx buffers */
1410 rx_ring->count = IGB_DEFAULT_RXD; 1410 rx_ring->count = IGB_DEFAULT_RXD;
1411 rx_ring->pdev = adapter->pdev; 1411 rx_ring->dev = &adapter->pdev->dev;
1412 rx_ring->netdev = adapter->netdev; 1412 rx_ring->netdev = adapter->netdev;
1413 rx_ring->rx_buffer_len = IGB_RXBUFFER_2048; 1413 rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
1414 rx_ring->reg_idx = adapter->vfs_allocated_count; 1414 rx_ring->reg_idx = adapter->vfs_allocated_count;
@@ -1604,10 +1604,10 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
1604 buffer_info = &rx_ring->buffer_info[rx_ntc]; 1604 buffer_info = &rx_ring->buffer_info[rx_ntc];
1605 1605
1606 /* unmap rx buffer, will be remapped by alloc_rx_buffers */ 1606 /* unmap rx buffer, will be remapped by alloc_rx_buffers */
1607 pci_unmap_single(rx_ring->pdev, 1607 dma_unmap_single(rx_ring->dev,
1608 buffer_info->dma, 1608 buffer_info->dma,
1609 rx_ring->rx_buffer_len, 1609 rx_ring->rx_buffer_len,
1610 PCI_DMA_FROMDEVICE); 1610 DMA_FROM_DEVICE);
1611 buffer_info->dma = 0; 1611 buffer_info->dma = 0;
1612 1612
1613 /* verify contents of skb */ 1613 /* verify contents of skb */
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index c19b1e0caecd..589cf4a6427a 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -201,6 +201,336 @@ MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
201MODULE_LICENSE("GPL"); 201MODULE_LICENSE("GPL");
202MODULE_VERSION(DRV_VERSION); 202MODULE_VERSION(DRV_VERSION);
203 203
204struct igb_reg_info {
205 u32 ofs;
206 char *name;
207};
208
209static const struct igb_reg_info igb_reg_info_tbl[] = {
210
211 /* General Registers */
212 {E1000_CTRL, "CTRL"},
213 {E1000_STATUS, "STATUS"},
214 {E1000_CTRL_EXT, "CTRL_EXT"},
215
216 /* Interrupt Registers */
217 {E1000_ICR, "ICR"},
218
219 /* RX Registers */
220 {E1000_RCTL, "RCTL"},
221 {E1000_RDLEN(0), "RDLEN"},
222 {E1000_RDH(0), "RDH"},
223 {E1000_RDT(0), "RDT"},
224 {E1000_RXDCTL(0), "RXDCTL"},
225 {E1000_RDBAL(0), "RDBAL"},
226 {E1000_RDBAH(0), "RDBAH"},
227
228 /* TX Registers */
229 {E1000_TCTL, "TCTL"},
230 {E1000_TDBAL(0), "TDBAL"},
231 {E1000_TDBAH(0), "TDBAH"},
232 {E1000_TDLEN(0), "TDLEN"},
233 {E1000_TDH(0), "TDH"},
234 {E1000_TDT(0), "TDT"},
235 {E1000_TXDCTL(0), "TXDCTL"},
236 {E1000_TDFH, "TDFH"},
237 {E1000_TDFT, "TDFT"},
238 {E1000_TDFHS, "TDFHS"},
239 {E1000_TDFPC, "TDFPC"},
240
241 /* List Terminator */
242 {}
243};
244
245/*
246 * igb_regdump - register printout routine
247 */
248static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
249{
250 int n = 0;
251 char rname[16];
252 u32 regs[8];
253
254 switch (reginfo->ofs) {
255 case E1000_RDLEN(0):
256 for (n = 0; n < 4; n++)
257 regs[n] = rd32(E1000_RDLEN(n));
258 break;
259 case E1000_RDH(0):
260 for (n = 0; n < 4; n++)
261 regs[n] = rd32(E1000_RDH(n));
262 break;
263 case E1000_RDT(0):
264 for (n = 0; n < 4; n++)
265 regs[n] = rd32(E1000_RDT(n));
266 break;
267 case E1000_RXDCTL(0):
268 for (n = 0; n < 4; n++)
269 regs[n] = rd32(E1000_RXDCTL(n));
270 break;
271 case E1000_RDBAL(0):
272 for (n = 0; n < 4; n++)
273 regs[n] = rd32(E1000_RDBAL(n));
274 break;
275 case E1000_RDBAH(0):
276 for (n = 0; n < 4; n++)
277 regs[n] = rd32(E1000_RDBAH(n));
278 break;
279 case E1000_TDBAL(0):
280 for (n = 0; n < 4; n++)
281 regs[n] = rd32(E1000_RDBAL(n));
282 break;
283 case E1000_TDBAH(0):
284 for (n = 0; n < 4; n++)
285 regs[n] = rd32(E1000_TDBAH(n));
286 break;
287 case E1000_TDLEN(0):
288 for (n = 0; n < 4; n++)
289 regs[n] = rd32(E1000_TDLEN(n));
290 break;
291 case E1000_TDH(0):
292 for (n = 0; n < 4; n++)
293 regs[n] = rd32(E1000_TDH(n));
294 break;
295 case E1000_TDT(0):
296 for (n = 0; n < 4; n++)
297 regs[n] = rd32(E1000_TDT(n));
298 break;
299 case E1000_TXDCTL(0):
300 for (n = 0; n < 4; n++)
301 regs[n] = rd32(E1000_TXDCTL(n));
302 break;
303 default:
304 printk(KERN_INFO "%-15s %08x\n",
305 reginfo->name, rd32(reginfo->ofs));
306 return;
307 }
308
309 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
310 printk(KERN_INFO "%-15s ", rname);
311 for (n = 0; n < 4; n++)
312 printk(KERN_CONT "%08x ", regs[n]);
313 printk(KERN_CONT "\n");
314}
315
316/*
317 * igb_dump - Print registers, tx-rings and rx-rings
318 */
319static void igb_dump(struct igb_adapter *adapter)
320{
321 struct net_device *netdev = adapter->netdev;
322 struct e1000_hw *hw = &adapter->hw;
323 struct igb_reg_info *reginfo;
324 int n = 0;
325 struct igb_ring *tx_ring;
326 union e1000_adv_tx_desc *tx_desc;
327 struct my_u0 { u64 a; u64 b; } *u0;
328 struct igb_buffer *buffer_info;
329 struct igb_ring *rx_ring;
330 union e1000_adv_rx_desc *rx_desc;
331 u32 staterr;
332 int i = 0;
333
334 if (!netif_msg_hw(adapter))
335 return;
336
337 /* Print netdevice Info */
338 if (netdev) {
339 dev_info(&adapter->pdev->dev, "Net device Info\n");
340 printk(KERN_INFO "Device Name state "
341 "trans_start last_rx\n");
342 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
343 netdev->name,
344 netdev->state,
345 netdev->trans_start,
346 netdev->last_rx);
347 }
348
349 /* Print Registers */
350 dev_info(&adapter->pdev->dev, "Register Dump\n");
351 printk(KERN_INFO " Register Name Value\n");
352 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
353 reginfo->name; reginfo++) {
354 igb_regdump(hw, reginfo);
355 }
356
357 /* Print TX Ring Summary */
358 if (!netdev || !netif_running(netdev))
359 goto exit;
360
361 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
362 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
363 " leng ntw timestamp\n");
364 for (n = 0; n < adapter->num_tx_queues; n++) {
365 tx_ring = adapter->tx_ring[n];
366 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
367 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
368 n, tx_ring->next_to_use, tx_ring->next_to_clean,
369 (u64)buffer_info->dma,
370 buffer_info->length,
371 buffer_info->next_to_watch,
372 (u64)buffer_info->time_stamp);
373 }
374
375 /* Print TX Rings */
376 if (!netif_msg_tx_done(adapter))
377 goto rx_ring_summary;
378
379 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
380
381 /* Transmit Descriptor Formats
382 *
383 * Advanced Transmit Descriptor
384 * +--------------------------------------------------------------+
385 * 0 | Buffer Address [63:0] |
386 * +--------------------------------------------------------------+
387 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
388 * +--------------------------------------------------------------+
389 * 63 46 45 40 39 38 36 35 32 31 24 15 0
390 */
391
392 for (n = 0; n < adapter->num_tx_queues; n++) {
393 tx_ring = adapter->tx_ring[n];
394 printk(KERN_INFO "------------------------------------\n");
395 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
396 printk(KERN_INFO "------------------------------------\n");
397 printk(KERN_INFO "T [desc] [address 63:0 ] "
398 "[PlPOCIStDDM Ln] [bi->dma ] "
399 "leng ntw timestamp bi->skb\n");
400
401 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
402 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
403 buffer_info = &tx_ring->buffer_info[i];
404 u0 = (struct my_u0 *)tx_desc;
405 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
406 " %04X %3X %016llX %p", i,
407 le64_to_cpu(u0->a),
408 le64_to_cpu(u0->b),
409 (u64)buffer_info->dma,
410 buffer_info->length,
411 buffer_info->next_to_watch,
412 (u64)buffer_info->time_stamp,
413 buffer_info->skb);
414 if (i == tx_ring->next_to_use &&
415 i == tx_ring->next_to_clean)
416 printk(KERN_CONT " NTC/U\n");
417 else if (i == tx_ring->next_to_use)
418 printk(KERN_CONT " NTU\n");
419 else if (i == tx_ring->next_to_clean)
420 printk(KERN_CONT " NTC\n");
421 else
422 printk(KERN_CONT "\n");
423
424 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
425 print_hex_dump(KERN_INFO, "",
426 DUMP_PREFIX_ADDRESS,
427 16, 1, phys_to_virt(buffer_info->dma),
428 buffer_info->length, true);
429 }
430 }
431
432 /* Print RX Rings Summary */
433rx_ring_summary:
434 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
435 printk(KERN_INFO "Queue [NTU] [NTC]\n");
436 for (n = 0; n < adapter->num_rx_queues; n++) {
437 rx_ring = adapter->rx_ring[n];
438 printk(KERN_INFO " %5d %5X %5X\n", n,
439 rx_ring->next_to_use, rx_ring->next_to_clean);
440 }
441
442 /* Print RX Rings */
443 if (!netif_msg_rx_status(adapter))
444 goto exit;
445
446 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
447
448 /* Advanced Receive Descriptor (Read) Format
449 * 63 1 0
450 * +-----------------------------------------------------+
451 * 0 | Packet Buffer Address [63:1] |A0/NSE|
452 * +----------------------------------------------+------+
453 * 8 | Header Buffer Address [63:1] | DD |
454 * +-----------------------------------------------------+
455 *
456 *
457 * Advanced Receive Descriptor (Write-Back) Format
458 *
459 * 63 48 47 32 31 30 21 20 17 16 4 3 0
460 * +------------------------------------------------------+
461 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
462 * | Checksum Ident | | | | Type | Type |
463 * +------------------------------------------------------+
464 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
465 * +------------------------------------------------------+
466 * 63 48 47 32 31 20 19 0
467 */
468
469 for (n = 0; n < adapter->num_rx_queues; n++) {
470 rx_ring = adapter->rx_ring[n];
471 printk(KERN_INFO "------------------------------------\n");
472 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
473 printk(KERN_INFO "------------------------------------\n");
474 printk(KERN_INFO "R [desc] [ PktBuf A0] "
475 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
476 "<-- Adv Rx Read format\n");
477 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
478 "[vl er S cks ln] ---------------- [bi->skb] "
479 "<-- Adv Rx Write-Back format\n");
480
481 for (i = 0; i < rx_ring->count; i++) {
482 buffer_info = &rx_ring->buffer_info[i];
483 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
484 u0 = (struct my_u0 *)rx_desc;
485 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
486 if (staterr & E1000_RXD_STAT_DD) {
487 /* Descriptor Done */
488 printk(KERN_INFO "RWB[0x%03X] %016llX "
489 "%016llX ---------------- %p", i,
490 le64_to_cpu(u0->a),
491 le64_to_cpu(u0->b),
492 buffer_info->skb);
493 } else {
494 printk(KERN_INFO "R [0x%03X] %016llX "
495 "%016llX %016llX %p", i,
496 le64_to_cpu(u0->a),
497 le64_to_cpu(u0->b),
498 (u64)buffer_info->dma,
499 buffer_info->skb);
500
501 if (netif_msg_pktdata(adapter)) {
502 print_hex_dump(KERN_INFO, "",
503 DUMP_PREFIX_ADDRESS,
504 16, 1,
505 phys_to_virt(buffer_info->dma),
506 rx_ring->rx_buffer_len, true);
507 if (rx_ring->rx_buffer_len
508 < IGB_RXBUFFER_1024)
509 print_hex_dump(KERN_INFO, "",
510 DUMP_PREFIX_ADDRESS,
511 16, 1,
512 phys_to_virt(
513 buffer_info->page_dma +
514 buffer_info->page_offset),
515 PAGE_SIZE/2, true);
516 }
517 }
518
519 if (i == rx_ring->next_to_use)
520 printk(KERN_CONT " NTU\n");
521 else if (i == rx_ring->next_to_clean)
522 printk(KERN_CONT " NTC\n");
523 else
524 printk(KERN_CONT "\n");
525
526 }
527 }
528
529exit:
530 return;
531}
532
533
204/** 534/**
205 * igb_read_clock - read raw cycle counter (to be used by time counter) 535 * igb_read_clock - read raw cycle counter (to be used by time counter)
206 */ 536 */
@@ -350,7 +680,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
350 goto err; 680 goto err;
351 ring->count = adapter->tx_ring_count; 681 ring->count = adapter->tx_ring_count;
352 ring->queue_index = i; 682 ring->queue_index = i;
353 ring->pdev = adapter->pdev; 683 ring->dev = &adapter->pdev->dev;
354 ring->netdev = adapter->netdev; 684 ring->netdev = adapter->netdev;
355 /* For 82575, context index must be unique per ring. */ 685 /* For 82575, context index must be unique per ring. */
356 if (adapter->hw.mac.type == e1000_82575) 686 if (adapter->hw.mac.type == e1000_82575)
@@ -364,7 +694,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
364 goto err; 694 goto err;
365 ring->count = adapter->rx_ring_count; 695 ring->count = adapter->rx_ring_count;
366 ring->queue_index = i; 696 ring->queue_index = i;
367 ring->pdev = adapter->pdev; 697 ring->dev = &adapter->pdev->dev;
368 ring->netdev = adapter->netdev; 698 ring->netdev = adapter->netdev;
369 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 699 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
370 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */ 700 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
@@ -1398,15 +1728,15 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1398 return err; 1728 return err;
1399 1729
1400 pci_using_dac = 0; 1730 pci_using_dac = 0;
1401 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1731 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1402 if (!err) { 1732 if (!err) {
1403 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1733 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1404 if (!err) 1734 if (!err)
1405 pci_using_dac = 1; 1735 pci_using_dac = 1;
1406 } else { 1736 } else {
1407 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1737 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1408 if (err) { 1738 if (err) {
1409 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1739 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1410 if (err) { 1740 if (err) {
1411 dev_err(&pdev->dev, "No usable DMA " 1741 dev_err(&pdev->dev, "No usable DMA "
1412 "configuration, aborting\n"); 1742 "configuration, aborting\n");
@@ -1638,6 +1968,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1638 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", 1968 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
1639 netdev->name, 1969 netdev->name,
1640 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : 1970 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1971 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
1641 "unknown"), 1972 "unknown"),
1642 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 1973 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1643 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : 1974 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
@@ -2079,7 +2410,7 @@ static int igb_close(struct net_device *netdev)
2079 **/ 2410 **/
2080int igb_setup_tx_resources(struct igb_ring *tx_ring) 2411int igb_setup_tx_resources(struct igb_ring *tx_ring)
2081{ 2412{
2082 struct pci_dev *pdev = tx_ring->pdev; 2413 struct device *dev = tx_ring->dev;
2083 int size; 2414 int size;
2084 2415
2085 size = sizeof(struct igb_buffer) * tx_ring->count; 2416 size = sizeof(struct igb_buffer) * tx_ring->count;
@@ -2092,9 +2423,10 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2092 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 2423 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
2093 tx_ring->size = ALIGN(tx_ring->size, 4096); 2424 tx_ring->size = ALIGN(tx_ring->size, 4096);
2094 2425
2095 tx_ring->desc = pci_alloc_consistent(pdev, 2426 tx_ring->desc = dma_alloc_coherent(dev,
2096 tx_ring->size, 2427 tx_ring->size,
2097 &tx_ring->dma); 2428 &tx_ring->dma,
2429 GFP_KERNEL);
2098 2430
2099 if (!tx_ring->desc) 2431 if (!tx_ring->desc)
2100 goto err; 2432 goto err;
@@ -2105,7 +2437,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2105 2437
2106err: 2438err:
2107 vfree(tx_ring->buffer_info); 2439 vfree(tx_ring->buffer_info);
2108 dev_err(&pdev->dev, 2440 dev_err(dev,
2109 "Unable to allocate memory for the transmit descriptor ring\n"); 2441 "Unable to allocate memory for the transmit descriptor ring\n");
2110 return -ENOMEM; 2442 return -ENOMEM;
2111} 2443}
@@ -2229,7 +2561,7 @@ static void igb_configure_tx(struct igb_adapter *adapter)
2229 **/ 2561 **/
2230int igb_setup_rx_resources(struct igb_ring *rx_ring) 2562int igb_setup_rx_resources(struct igb_ring *rx_ring)
2231{ 2563{
2232 struct pci_dev *pdev = rx_ring->pdev; 2564 struct device *dev = rx_ring->dev;
2233 int size, desc_len; 2565 int size, desc_len;
2234 2566
2235 size = sizeof(struct igb_buffer) * rx_ring->count; 2567 size = sizeof(struct igb_buffer) * rx_ring->count;
@@ -2244,8 +2576,10 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
2244 rx_ring->size = rx_ring->count * desc_len; 2576 rx_ring->size = rx_ring->count * desc_len;
2245 rx_ring->size = ALIGN(rx_ring->size, 4096); 2577 rx_ring->size = ALIGN(rx_ring->size, 4096);
2246 2578
2247 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 2579 rx_ring->desc = dma_alloc_coherent(dev,
2248 &rx_ring->dma); 2580 rx_ring->size,
2581 &rx_ring->dma,
2582 GFP_KERNEL);
2249 2583
2250 if (!rx_ring->desc) 2584 if (!rx_ring->desc)
2251 goto err; 2585 goto err;
@@ -2258,8 +2592,8 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
2258err: 2592err:
2259 vfree(rx_ring->buffer_info); 2593 vfree(rx_ring->buffer_info);
2260 rx_ring->buffer_info = NULL; 2594 rx_ring->buffer_info = NULL;
2261 dev_err(&pdev->dev, "Unable to allocate memory for " 2595 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2262 "the receive descriptor ring\n"); 2596 " ring\n");
2263 return -ENOMEM; 2597 return -ENOMEM;
2264} 2598}
2265 2599
@@ -2635,8 +2969,8 @@ void igb_free_tx_resources(struct igb_ring *tx_ring)
2635 if (!tx_ring->desc) 2969 if (!tx_ring->desc)
2636 return; 2970 return;
2637 2971
2638 pci_free_consistent(tx_ring->pdev, tx_ring->size, 2972 dma_free_coherent(tx_ring->dev, tx_ring->size,
2639 tx_ring->desc, tx_ring->dma); 2973 tx_ring->desc, tx_ring->dma);
2640 2974
2641 tx_ring->desc = NULL; 2975 tx_ring->desc = NULL;
2642} 2976}
@@ -2660,15 +2994,15 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2660{ 2994{
2661 if (buffer_info->dma) { 2995 if (buffer_info->dma) {
2662 if (buffer_info->mapped_as_page) 2996 if (buffer_info->mapped_as_page)
2663 pci_unmap_page(tx_ring->pdev, 2997 dma_unmap_page(tx_ring->dev,
2664 buffer_info->dma, 2998 buffer_info->dma,
2665 buffer_info->length, 2999 buffer_info->length,
2666 PCI_DMA_TODEVICE); 3000 DMA_TO_DEVICE);
2667 else 3001 else
2668 pci_unmap_single(tx_ring->pdev, 3002 dma_unmap_single(tx_ring->dev,
2669 buffer_info->dma, 3003 buffer_info->dma,
2670 buffer_info->length, 3004 buffer_info->length,
2671 PCI_DMA_TODEVICE); 3005 DMA_TO_DEVICE);
2672 buffer_info->dma = 0; 3006 buffer_info->dma = 0;
2673 } 3007 }
2674 if (buffer_info->skb) { 3008 if (buffer_info->skb) {
@@ -2739,8 +3073,8 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
2739 if (!rx_ring->desc) 3073 if (!rx_ring->desc)
2740 return; 3074 return;
2741 3075
2742 pci_free_consistent(rx_ring->pdev, rx_ring->size, 3076 dma_free_coherent(rx_ring->dev, rx_ring->size,
2743 rx_ring->desc, rx_ring->dma); 3077 rx_ring->desc, rx_ring->dma);
2744 3078
2745 rx_ring->desc = NULL; 3079 rx_ring->desc = NULL;
2746} 3080}
@@ -2776,10 +3110,10 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2776 for (i = 0; i < rx_ring->count; i++) { 3110 for (i = 0; i < rx_ring->count; i++) {
2777 buffer_info = &rx_ring->buffer_info[i]; 3111 buffer_info = &rx_ring->buffer_info[i];
2778 if (buffer_info->dma) { 3112 if (buffer_info->dma) {
2779 pci_unmap_single(rx_ring->pdev, 3113 dma_unmap_single(rx_ring->dev,
2780 buffer_info->dma, 3114 buffer_info->dma,
2781 rx_ring->rx_buffer_len, 3115 rx_ring->rx_buffer_len,
2782 PCI_DMA_FROMDEVICE); 3116 DMA_FROM_DEVICE);
2783 buffer_info->dma = 0; 3117 buffer_info->dma = 0;
2784 } 3118 }
2785 3119
@@ -2788,10 +3122,10 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2788 buffer_info->skb = NULL; 3122 buffer_info->skb = NULL;
2789 } 3123 }
2790 if (buffer_info->page_dma) { 3124 if (buffer_info->page_dma) {
2791 pci_unmap_page(rx_ring->pdev, 3125 dma_unmap_page(rx_ring->dev,
2792 buffer_info->page_dma, 3126 buffer_info->page_dma,
2793 PAGE_SIZE / 2, 3127 PAGE_SIZE / 2,
2794 PCI_DMA_FROMDEVICE); 3128 DMA_FROM_DEVICE);
2795 buffer_info->page_dma = 0; 3129 buffer_info->page_dma = 0;
2796 } 3130 }
2797 if (buffer_info->page) { 3131 if (buffer_info->page) {
@@ -3479,7 +3813,7 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3479 struct sk_buff *skb, u32 tx_flags) 3813 struct sk_buff *skb, u32 tx_flags)
3480{ 3814{
3481 struct e1000_adv_tx_context_desc *context_desc; 3815 struct e1000_adv_tx_context_desc *context_desc;
3482 struct pci_dev *pdev = tx_ring->pdev; 3816 struct device *dev = tx_ring->dev;
3483 struct igb_buffer *buffer_info; 3817 struct igb_buffer *buffer_info;
3484 u32 info = 0, tu_cmd = 0; 3818 u32 info = 0, tu_cmd = 0;
3485 unsigned int i; 3819 unsigned int i;
@@ -3530,7 +3864,7 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3530 break; 3864 break;
3531 default: 3865 default:
3532 if (unlikely(net_ratelimit())) 3866 if (unlikely(net_ratelimit()))
3533 dev_warn(&pdev->dev, 3867 dev_warn(dev,
3534 "partial checksum but proto=%x!\n", 3868 "partial checksum but proto=%x!\n",
3535 skb->protocol); 3869 skb->protocol);
3536 break; 3870 break;
@@ -3564,59 +3898,61 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3564 unsigned int first) 3898 unsigned int first)
3565{ 3899{
3566 struct igb_buffer *buffer_info; 3900 struct igb_buffer *buffer_info;
3567 struct pci_dev *pdev = tx_ring->pdev; 3901 struct device *dev = tx_ring->dev;
3568 unsigned int len = skb_headlen(skb); 3902 unsigned int hlen = skb_headlen(skb);
3569 unsigned int count = 0, i; 3903 unsigned int count = 0, i;
3570 unsigned int f; 3904 unsigned int f;
3905 u16 gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
3571 3906
3572 i = tx_ring->next_to_use; 3907 i = tx_ring->next_to_use;
3573 3908
3574 buffer_info = &tx_ring->buffer_info[i]; 3909 buffer_info = &tx_ring->buffer_info[i];
3575 BUG_ON(len >= IGB_MAX_DATA_PER_TXD); 3910 BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD);
3576 buffer_info->length = len; 3911 buffer_info->length = hlen;
3577 /* set time_stamp *before* dma to help avoid a possible race */ 3912 /* set time_stamp *before* dma to help avoid a possible race */
3578 buffer_info->time_stamp = jiffies; 3913 buffer_info->time_stamp = jiffies;
3579 buffer_info->next_to_watch = i; 3914 buffer_info->next_to_watch = i;
3580 buffer_info->dma = pci_map_single(pdev, skb->data, len, 3915 buffer_info->dma = dma_map_single(dev, skb->data, hlen,
3581 PCI_DMA_TODEVICE); 3916 DMA_TO_DEVICE);
3582 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 3917 if (dma_mapping_error(dev, buffer_info->dma))
3583 goto dma_error; 3918 goto dma_error;
3584 3919
3585 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 3920 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3586 struct skb_frag_struct *frag; 3921 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f];
3922 unsigned int len = frag->size;
3587 3923
3588 count++; 3924 count++;
3589 i++; 3925 i++;
3590 if (i == tx_ring->count) 3926 if (i == tx_ring->count)
3591 i = 0; 3927 i = 0;
3592 3928
3593 frag = &skb_shinfo(skb)->frags[f];
3594 len = frag->size;
3595
3596 buffer_info = &tx_ring->buffer_info[i]; 3929 buffer_info = &tx_ring->buffer_info[i];
3597 BUG_ON(len >= IGB_MAX_DATA_PER_TXD); 3930 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3598 buffer_info->length = len; 3931 buffer_info->length = len;
3599 buffer_info->time_stamp = jiffies; 3932 buffer_info->time_stamp = jiffies;
3600 buffer_info->next_to_watch = i; 3933 buffer_info->next_to_watch = i;
3601 buffer_info->mapped_as_page = true; 3934 buffer_info->mapped_as_page = true;
3602 buffer_info->dma = pci_map_page(pdev, 3935 buffer_info->dma = dma_map_page(dev,
3603 frag->page, 3936 frag->page,
3604 frag->page_offset, 3937 frag->page_offset,
3605 len, 3938 len,
3606 PCI_DMA_TODEVICE); 3939 DMA_TO_DEVICE);
3607 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 3940 if (dma_mapping_error(dev, buffer_info->dma))
3608 goto dma_error; 3941 goto dma_error;
3609 3942
3610 } 3943 }
3611 3944
3612 tx_ring->buffer_info[i].skb = skb; 3945 tx_ring->buffer_info[i].skb = skb;
3613 tx_ring->buffer_info[i].gso_segs = skb_shinfo(skb)->gso_segs ?: 1; 3946 tx_ring->buffer_info[i].shtx = skb_shinfo(skb)->tx_flags;
3947 /* multiply data chunks by size of headers */
3948 tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len;
3949 tx_ring->buffer_info[i].gso_segs = gso_segs;
3614 tx_ring->buffer_info[first].next_to_watch = i; 3950 tx_ring->buffer_info[first].next_to_watch = i;
3615 3951
3616 return ++count; 3952 return ++count;
3617 3953
3618dma_error: 3954dma_error:
3619 dev_err(&pdev->dev, "TX DMA map failed\n"); 3955 dev_err(dev, "TX DMA map failed\n");
3620 3956
3621 /* clear timestamp and dma mappings for failed buffer_info mapping */ 3957 /* clear timestamp and dma mappings for failed buffer_info mapping */
3622 buffer_info->dma = 0; 3958 buffer_info->dma = 0;
@@ -3854,6 +4190,8 @@ static void igb_reset_task(struct work_struct *work)
3854 struct igb_adapter *adapter; 4190 struct igb_adapter *adapter;
3855 adapter = container_of(work, struct igb_adapter, reset_task); 4191 adapter = container_of(work, struct igb_adapter, reset_task);
3856 4192
4193 igb_dump(adapter);
4194 netdev_err(adapter->netdev, "Reset adapter\n");
3857 igb_reinit_locked(adapter); 4195 igb_reinit_locked(adapter);
3858} 4196}
3859 4197
@@ -4952,22 +5290,21 @@ static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
4952/** 5290/**
4953 * igb_tx_hwtstamp - utility function which checks for TX time stamp 5291 * igb_tx_hwtstamp - utility function which checks for TX time stamp
4954 * @q_vector: pointer to q_vector containing needed info 5292 * @q_vector: pointer to q_vector containing needed info
4955 * @skb: packet that was just sent 5293 * @buffer: pointer to igb_buffer structure
4956 * 5294 *
4957 * If we were asked to do hardware stamping and such a time stamp is 5295 * If we were asked to do hardware stamping and such a time stamp is
4958 * available, then it must have been for this skb here because we only 5296 * available, then it must have been for this skb here because we only
4959 * allow only one such packet into the queue. 5297 * allow only one such packet into the queue.
4960 */ 5298 */
4961static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb) 5299static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *buffer_info)
4962{ 5300{
4963 struct igb_adapter *adapter = q_vector->adapter; 5301 struct igb_adapter *adapter = q_vector->adapter;
4964 union skb_shared_tx *shtx = skb_tx(skb);
4965 struct e1000_hw *hw = &adapter->hw; 5302 struct e1000_hw *hw = &adapter->hw;
4966 struct skb_shared_hwtstamps shhwtstamps; 5303 struct skb_shared_hwtstamps shhwtstamps;
4967 u64 regval; 5304 u64 regval;
4968 5305
4969 /* if skb does not support hw timestamp or TX stamp not valid exit */ 5306 /* if skb does not support hw timestamp or TX stamp not valid exit */
4970 if (likely(!shtx->hardware) || 5307 if (likely(!buffer_info->shtx.hardware) ||
4971 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID)) 5308 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
4972 return; 5309 return;
4973 5310
@@ -4975,7 +5312,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
4975 regval |= (u64)rd32(E1000_TXSTMPH) << 32; 5312 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4976 5313
4977 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval); 5314 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
4978 skb_tstamp_tx(skb, &shhwtstamps); 5315 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
4979} 5316}
4980 5317
4981/** 5318/**
@@ -4990,7 +5327,6 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
4990 struct net_device *netdev = tx_ring->netdev; 5327 struct net_device *netdev = tx_ring->netdev;
4991 struct e1000_hw *hw = &adapter->hw; 5328 struct e1000_hw *hw = &adapter->hw;
4992 struct igb_buffer *buffer_info; 5329 struct igb_buffer *buffer_info;
4993 struct sk_buff *skb;
4994 union e1000_adv_tx_desc *tx_desc, *eop_desc; 5330 union e1000_adv_tx_desc *tx_desc, *eop_desc;
4995 unsigned int total_bytes = 0, total_packets = 0; 5331 unsigned int total_bytes = 0, total_packets = 0;
4996 unsigned int i, eop, count = 0; 5332 unsigned int i, eop, count = 0;
@@ -5006,19 +5342,12 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5006 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); 5342 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
5007 buffer_info = &tx_ring->buffer_info[i]; 5343 buffer_info = &tx_ring->buffer_info[i];
5008 cleaned = (i == eop); 5344 cleaned = (i == eop);
5009 skb = buffer_info->skb;
5010 5345
5011 if (skb) { 5346 if (buffer_info->skb) {
5012 unsigned int segs, bytecount; 5347 total_bytes += buffer_info->bytecount;
5013 /* gso_segs is currently only valid for tcp */ 5348 /* gso_segs is currently only valid for tcp */
5014 segs = buffer_info->gso_segs; 5349 total_packets += buffer_info->gso_segs;
5015 /* multiply data chunks by size of headers */ 5350 igb_tx_hwtstamp(q_vector, buffer_info);
5016 bytecount = ((segs - 1) * skb_headlen(skb)) +
5017 skb->len;
5018 total_packets += segs;
5019 total_bytes += bytecount;
5020
5021 igb_tx_hwtstamp(q_vector, skb);
5022 } 5351 }
5023 5352
5024 igb_unmap_and_free_tx_resource(tx_ring, buffer_info); 5353 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
@@ -5058,7 +5387,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5058 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { 5387 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
5059 5388
5060 /* detected Tx unit hang */ 5389 /* detected Tx unit hang */
5061 dev_err(&tx_ring->pdev->dev, 5390 dev_err(tx_ring->dev,
5062 "Detected Tx Unit Hang\n" 5391 "Detected Tx Unit Hang\n"
5063 " Tx Queue <%d>\n" 5392 " Tx Queue <%d>\n"
5064 " TDH <%x>\n" 5393 " TDH <%x>\n"
@@ -5137,7 +5466,7 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring,
5137 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) 5466 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
5138 skb->ip_summed = CHECKSUM_UNNECESSARY; 5467 skb->ip_summed = CHECKSUM_UNNECESSARY;
5139 5468
5140 dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err); 5469 dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err);
5141} 5470}
5142 5471
5143static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr, 5472static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
@@ -5192,7 +5521,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5192{ 5521{
5193 struct igb_ring *rx_ring = q_vector->rx_ring; 5522 struct igb_ring *rx_ring = q_vector->rx_ring;
5194 struct net_device *netdev = rx_ring->netdev; 5523 struct net_device *netdev = rx_ring->netdev;
5195 struct pci_dev *pdev = rx_ring->pdev; 5524 struct device *dev = rx_ring->dev;
5196 union e1000_adv_rx_desc *rx_desc , *next_rxd; 5525 union e1000_adv_rx_desc *rx_desc , *next_rxd;
5197 struct igb_buffer *buffer_info , *next_buffer; 5526 struct igb_buffer *buffer_info , *next_buffer;
5198 struct sk_buff *skb; 5527 struct sk_buff *skb;
@@ -5232,9 +5561,9 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5232 cleaned_count++; 5561 cleaned_count++;
5233 5562
5234 if (buffer_info->dma) { 5563 if (buffer_info->dma) {
5235 pci_unmap_single(pdev, buffer_info->dma, 5564 dma_unmap_single(dev, buffer_info->dma,
5236 rx_ring->rx_buffer_len, 5565 rx_ring->rx_buffer_len,
5237 PCI_DMA_FROMDEVICE); 5566 DMA_FROM_DEVICE);
5238 buffer_info->dma = 0; 5567 buffer_info->dma = 0;
5239 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) { 5568 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
5240 skb_put(skb, length); 5569 skb_put(skb, length);
@@ -5244,11 +5573,11 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5244 } 5573 }
5245 5574
5246 if (length) { 5575 if (length) {
5247 pci_unmap_page(pdev, buffer_info->page_dma, 5576 dma_unmap_page(dev, buffer_info->page_dma,
5248 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); 5577 PAGE_SIZE / 2, DMA_FROM_DEVICE);
5249 buffer_info->page_dma = 0; 5578 buffer_info->page_dma = 0;
5250 5579
5251 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, 5580 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
5252 buffer_info->page, 5581 buffer_info->page,
5253 buffer_info->page_offset, 5582 buffer_info->page_offset,
5254 length); 5583 length);
@@ -5353,12 +5682,12 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5353 buffer_info->page_offset ^= PAGE_SIZE / 2; 5682 buffer_info->page_offset ^= PAGE_SIZE / 2;
5354 } 5683 }
5355 buffer_info->page_dma = 5684 buffer_info->page_dma =
5356 pci_map_page(rx_ring->pdev, buffer_info->page, 5685 dma_map_page(rx_ring->dev, buffer_info->page,
5357 buffer_info->page_offset, 5686 buffer_info->page_offset,
5358 PAGE_SIZE / 2, 5687 PAGE_SIZE / 2,
5359 PCI_DMA_FROMDEVICE); 5688 DMA_FROM_DEVICE);
5360 if (pci_dma_mapping_error(rx_ring->pdev, 5689 if (dma_mapping_error(rx_ring->dev,
5361 buffer_info->page_dma)) { 5690 buffer_info->page_dma)) {
5362 buffer_info->page_dma = 0; 5691 buffer_info->page_dma = 0;
5363 rx_ring->rx_stats.alloc_failed++; 5692 rx_ring->rx_stats.alloc_failed++;
5364 goto no_buffers; 5693 goto no_buffers;
@@ -5376,12 +5705,12 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5376 buffer_info->skb = skb; 5705 buffer_info->skb = skb;
5377 } 5706 }
5378 if (!buffer_info->dma) { 5707 if (!buffer_info->dma) {
5379 buffer_info->dma = pci_map_single(rx_ring->pdev, 5708 buffer_info->dma = dma_map_single(rx_ring->dev,
5380 skb->data, 5709 skb->data,
5381 bufsz, 5710 bufsz,
5382 PCI_DMA_FROMDEVICE); 5711 DMA_FROM_DEVICE);
5383 if (pci_dma_mapping_error(rx_ring->pdev, 5712 if (dma_mapping_error(rx_ring->dev,
5384 buffer_info->dma)) { 5713 buffer_info->dma)) {
5385 buffer_info->dma = 0; 5714 buffer_info->dma = 0;
5386 rx_ring->rx_stats.alloc_failed++; 5715 rx_ring->rx_stats.alloc_failed++;
5387 goto no_buffers; 5716 goto no_buffers;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index cc2309027e6a..7012e3da3bdf 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -164,10 +164,10 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
164 buffer_info->page_offset ^= PAGE_SIZE / 2; 164 buffer_info->page_offset ^= PAGE_SIZE / 2;
165 } 165 }
166 buffer_info->page_dma = 166 buffer_info->page_dma =
167 pci_map_page(pdev, buffer_info->page, 167 dma_map_page(&pdev->dev, buffer_info->page,
168 buffer_info->page_offset, 168 buffer_info->page_offset,
169 PAGE_SIZE / 2, 169 PAGE_SIZE / 2,
170 PCI_DMA_FROMDEVICE); 170 DMA_FROM_DEVICE);
171 } 171 }
172 172
173 if (!buffer_info->skb) { 173 if (!buffer_info->skb) {
@@ -178,9 +178,9 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
178 } 178 }
179 179
180 buffer_info->skb = skb; 180 buffer_info->skb = skb;
181 buffer_info->dma = pci_map_single(pdev, skb->data, 181 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
182 bufsz, 182 bufsz,
183 PCI_DMA_FROMDEVICE); 183 DMA_FROM_DEVICE);
184 } 184 }
185 /* Refresh the desc even if buffer_addrs didn't change because 185 /* Refresh the desc even if buffer_addrs didn't change because
186 * each write-back erases this info. */ 186 * each write-back erases this info. */
@@ -268,28 +268,28 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
268 prefetch(skb->data - NET_IP_ALIGN); 268 prefetch(skb->data - NET_IP_ALIGN);
269 buffer_info->skb = NULL; 269 buffer_info->skb = NULL;
270 if (!adapter->rx_ps_hdr_size) { 270 if (!adapter->rx_ps_hdr_size) {
271 pci_unmap_single(pdev, buffer_info->dma, 271 dma_unmap_single(&pdev->dev, buffer_info->dma,
272 adapter->rx_buffer_len, 272 adapter->rx_buffer_len,
273 PCI_DMA_FROMDEVICE); 273 DMA_FROM_DEVICE);
274 buffer_info->dma = 0; 274 buffer_info->dma = 0;
275 skb_put(skb, length); 275 skb_put(skb, length);
276 goto send_up; 276 goto send_up;
277 } 277 }
278 278
279 if (!skb_shinfo(skb)->nr_frags) { 279 if (!skb_shinfo(skb)->nr_frags) {
280 pci_unmap_single(pdev, buffer_info->dma, 280 dma_unmap_single(&pdev->dev, buffer_info->dma,
281 adapter->rx_ps_hdr_size, 281 adapter->rx_ps_hdr_size,
282 PCI_DMA_FROMDEVICE); 282 DMA_FROM_DEVICE);
283 skb_put(skb, hlen); 283 skb_put(skb, hlen);
284 } 284 }
285 285
286 if (length) { 286 if (length) {
287 pci_unmap_page(pdev, buffer_info->page_dma, 287 dma_unmap_page(&pdev->dev, buffer_info->page_dma,
288 PAGE_SIZE / 2, 288 PAGE_SIZE / 2,
289 PCI_DMA_FROMDEVICE); 289 DMA_FROM_DEVICE);
290 buffer_info->page_dma = 0; 290 buffer_info->page_dma = 0;
291 291
292 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, 292 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
293 buffer_info->page, 293 buffer_info->page,
294 buffer_info->page_offset, 294 buffer_info->page_offset,
295 length); 295 length);
@@ -369,15 +369,15 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
369{ 369{
370 if (buffer_info->dma) { 370 if (buffer_info->dma) {
371 if (buffer_info->mapped_as_page) 371 if (buffer_info->mapped_as_page)
372 pci_unmap_page(adapter->pdev, 372 dma_unmap_page(&adapter->pdev->dev,
373 buffer_info->dma, 373 buffer_info->dma,
374 buffer_info->length, 374 buffer_info->length,
375 PCI_DMA_TODEVICE); 375 DMA_TO_DEVICE);
376 else 376 else
377 pci_unmap_single(adapter->pdev, 377 dma_unmap_single(&adapter->pdev->dev,
378 buffer_info->dma, 378 buffer_info->dma,
379 buffer_info->length, 379 buffer_info->length,
380 PCI_DMA_TODEVICE); 380 DMA_TO_DEVICE);
381 buffer_info->dma = 0; 381 buffer_info->dma = 0;
382 } 382 }
383 if (buffer_info->skb) { 383 if (buffer_info->skb) {
@@ -438,8 +438,8 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
438 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 438 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
439 tx_ring->size = ALIGN(tx_ring->size, 4096); 439 tx_ring->size = ALIGN(tx_ring->size, 4096);
440 440
441 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 441 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
442 &tx_ring->dma); 442 &tx_ring->dma, GFP_KERNEL);
443 443
444 if (!tx_ring->desc) 444 if (!tx_ring->desc)
445 goto err; 445 goto err;
@@ -480,8 +480,8 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
480 rx_ring->size = rx_ring->count * desc_len; 480 rx_ring->size = rx_ring->count * desc_len;
481 rx_ring->size = ALIGN(rx_ring->size, 4096); 481 rx_ring->size = ALIGN(rx_ring->size, 4096);
482 482
483 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 483 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
484 &rx_ring->dma); 484 &rx_ring->dma, GFP_KERNEL);
485 485
486 if (!rx_ring->desc) 486 if (!rx_ring->desc)
487 goto err; 487 goto err;
@@ -549,7 +549,8 @@ void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
549 vfree(tx_ring->buffer_info); 549 vfree(tx_ring->buffer_info);
550 tx_ring->buffer_info = NULL; 550 tx_ring->buffer_info = NULL;
551 551
552 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 552 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
553 tx_ring->dma);
553 554
554 tx_ring->desc = NULL; 555 tx_ring->desc = NULL;
555} 556}
@@ -574,13 +575,13 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
574 buffer_info = &rx_ring->buffer_info[i]; 575 buffer_info = &rx_ring->buffer_info[i];
575 if (buffer_info->dma) { 576 if (buffer_info->dma) {
576 if (adapter->rx_ps_hdr_size){ 577 if (adapter->rx_ps_hdr_size){
577 pci_unmap_single(pdev, buffer_info->dma, 578 dma_unmap_single(&pdev->dev, buffer_info->dma,
578 adapter->rx_ps_hdr_size, 579 adapter->rx_ps_hdr_size,
579 PCI_DMA_FROMDEVICE); 580 DMA_FROM_DEVICE);
580 } else { 581 } else {
581 pci_unmap_single(pdev, buffer_info->dma, 582 dma_unmap_single(&pdev->dev, buffer_info->dma,
582 adapter->rx_buffer_len, 583 adapter->rx_buffer_len,
583 PCI_DMA_FROMDEVICE); 584 DMA_FROM_DEVICE);
584 } 585 }
585 buffer_info->dma = 0; 586 buffer_info->dma = 0;
586 } 587 }
@@ -592,9 +593,10 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
592 593
593 if (buffer_info->page) { 594 if (buffer_info->page) {
594 if (buffer_info->page_dma) 595 if (buffer_info->page_dma)
595 pci_unmap_page(pdev, buffer_info->page_dma, 596 dma_unmap_page(&pdev->dev,
597 buffer_info->page_dma,
596 PAGE_SIZE / 2, 598 PAGE_SIZE / 2,
597 PCI_DMA_FROMDEVICE); 599 DMA_FROM_DEVICE);
598 put_page(buffer_info->page); 600 put_page(buffer_info->page);
599 buffer_info->page = NULL; 601 buffer_info->page = NULL;
600 buffer_info->page_dma = 0; 602 buffer_info->page_dma = 0;
@@ -2104,9 +2106,9 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2104 buffer_info->time_stamp = jiffies; 2106 buffer_info->time_stamp = jiffies;
2105 buffer_info->next_to_watch = i; 2107 buffer_info->next_to_watch = i;
2106 buffer_info->mapped_as_page = false; 2108 buffer_info->mapped_as_page = false;
2107 buffer_info->dma = pci_map_single(pdev, skb->data, len, 2109 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
2108 PCI_DMA_TODEVICE); 2110 DMA_TO_DEVICE);
2109 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 2111 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2110 goto dma_error; 2112 goto dma_error;
2111 2113
2112 2114
@@ -2127,12 +2129,12 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2127 buffer_info->time_stamp = jiffies; 2129 buffer_info->time_stamp = jiffies;
2128 buffer_info->next_to_watch = i; 2130 buffer_info->next_to_watch = i;
2129 buffer_info->mapped_as_page = true; 2131 buffer_info->mapped_as_page = true;
2130 buffer_info->dma = pci_map_page(pdev, 2132 buffer_info->dma = dma_map_page(&pdev->dev,
2131 frag->page, 2133 frag->page,
2132 frag->page_offset, 2134 frag->page_offset,
2133 len, 2135 len,
2134 PCI_DMA_TODEVICE); 2136 DMA_TO_DEVICE);
2135 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 2137 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2136 goto dma_error; 2138 goto dma_error;
2137 } 2139 }
2138 2140
@@ -2644,16 +2646,16 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
2644 return err; 2646 return err;
2645 2647
2646 pci_using_dac = 0; 2648 pci_using_dac = 0;
2647 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2649 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2648 if (!err) { 2650 if (!err) {
2649 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2651 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
2650 if (!err) 2652 if (!err)
2651 pci_using_dac = 1; 2653 pci_using_dac = 1;
2652 } else { 2654 } else {
2653 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2655 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2654 if (err) { 2656 if (err) {
2655 err = pci_set_consistent_dma_mask(pdev, 2657 err = dma_set_coherent_mask(&pdev->dev,
2656 DMA_BIT_MASK(32)); 2658 DMA_BIT_MASK(32));
2657 if (err) { 2659 if (err) {
2658 dev_err(&pdev->dev, "No usable DMA " 2660 dev_err(&pdev->dev, "No usable DMA "
2659 "configuration, aborting\n"); 2661 "configuration, aborting\n");
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 92d2e71d0c8b..521c0c732998 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -78,9 +78,13 @@ struct ixgb_adapter;
78#define PFX "ixgb: " 78#define PFX "ixgb: "
79 79
80#ifdef _DEBUG_DRIVER_ 80#ifdef _DEBUG_DRIVER_
81#define IXGB_DBG(args...) printk(KERN_DEBUG PFX args) 81#define IXGB_DBG(fmt, args...) printk(KERN_DEBUG PFX fmt, ##args)
82#else 82#else
83#define IXGB_DBG(args...) 83#define IXGB_DBG(fmt, args...) \
84do { \
85 if (0) \
86 printk(KERN_DEBUG PFX fmt, ##args); \
87} while (0)
84#endif 88#endif
85 89
86/* TX/RX descriptor defines */ 90/* TX/RX descriptor defines */
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index 89ffa7264a12..06303a36aaf7 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -26,6 +26,8 @@
26 26
27*******************************************************************************/ 27*******************************************************************************/
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include "ixgb_hw.h" 31#include "ixgb_hw.h"
30#include "ixgb_ee.h" 32#include "ixgb_ee.h"
31/* Local prototypes */ 33/* Local prototypes */
@@ -467,11 +469,11 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
467 u16 checksum = 0; 469 u16 checksum = 0;
468 struct ixgb_ee_map_type *ee_map; 470 struct ixgb_ee_map_type *ee_map;
469 471
470 DEBUGFUNC("ixgb_get_eeprom_data"); 472 ENTER();
471 473
472 ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 474 ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
473 475
474 DEBUGOUT("ixgb_ee: Reading eeprom data\n"); 476 pr_debug("Reading eeprom data\n");
475 for (i = 0; i < IXGB_EEPROM_SIZE ; i++) { 477 for (i = 0; i < IXGB_EEPROM_SIZE ; i++) {
476 u16 ee_data; 478 u16 ee_data;
477 ee_data = ixgb_read_eeprom(hw, i); 479 ee_data = ixgb_read_eeprom(hw, i);
@@ -480,7 +482,7 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
480 } 482 }
481 483
482 if (checksum != (u16) EEPROM_SUM) { 484 if (checksum != (u16) EEPROM_SUM) {
483 DEBUGOUT("ixgb_ee: Checksum invalid.\n"); 485 pr_debug("Checksum invalid\n");
484 /* clear the init_ctrl_reg_1 to signify that the cache is 486 /* clear the init_ctrl_reg_1 to signify that the cache is
485 * invalidated */ 487 * invalidated */
486 ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR); 488 ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
@@ -489,7 +491,7 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
489 491
490 if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK)) 492 if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
491 != cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) { 493 != cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
492 DEBUGOUT("ixgb_ee: Signature invalid.\n"); 494 pr_debug("Signature invalid\n");
493 return(false); 495 return(false);
494 } 496 }
495 497
@@ -555,13 +557,13 @@ ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
555 int i; 557 int i;
556 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 558 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
557 559
558 DEBUGFUNC("ixgb_get_ee_mac_addr"); 560 ENTER();
559 561
560 if (ixgb_check_and_get_eeprom_data(hw) == true) { 562 if (ixgb_check_and_get_eeprom_data(hw) == true) {
561 for (i = 0; i < IXGB_ETH_LENGTH_OF_ADDRESS; i++) { 563 for (i = 0; i < IXGB_ETH_LENGTH_OF_ADDRESS; i++) {
562 mac_addr[i] = ee_map->mac_addr[i]; 564 mac_addr[i] = ee_map->mac_addr[i];
563 DEBUGOUT2("mac(%d) = %.2X\n", i, mac_addr[i]);
564 } 565 }
566 pr_debug("eeprom mac address = %pM\n", mac_addr);
565 } 567 }
566} 568}
567 569
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index ff67a84e6802..cd247b8d2b73 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -30,9 +30,13 @@
30 * Shared functions for accessing and configuring the adapter 30 * Shared functions for accessing and configuring the adapter
31 */ 31 */
32 32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
33#include "ixgb_hw.h" 35#include "ixgb_hw.h"
34#include "ixgb_ids.h" 36#include "ixgb_ids.h"
35 37
38#include <linux/etherdevice.h>
39
36/* Local function prototypes */ 40/* Local function prototypes */
37 41
38static u32 ixgb_hash_mc_addr(struct ixgb_hw *hw, u8 * mc_addr); 42static u32 ixgb_hash_mc_addr(struct ixgb_hw *hw, u8 * mc_addr);
@@ -120,13 +124,13 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
120 u32 ctrl_reg; 124 u32 ctrl_reg;
121 u32 icr_reg; 125 u32 icr_reg;
122 126
123 DEBUGFUNC("ixgb_adapter_stop"); 127 ENTER();
124 128
125 /* If we are stopped or resetting exit gracefully and wait to be 129 /* If we are stopped or resetting exit gracefully and wait to be
126 * started again before accessing the hardware. 130 * started again before accessing the hardware.
127 */ 131 */
128 if (hw->adapter_stopped) { 132 if (hw->adapter_stopped) {
129 DEBUGOUT("Exiting because the adapter is already stopped!!!\n"); 133 pr_debug("Exiting because the adapter is already stopped!!!\n");
130 return false; 134 return false;
131 } 135 }
132 136
@@ -136,7 +140,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
136 hw->adapter_stopped = true; 140 hw->adapter_stopped = true;
137 141
138 /* Clear interrupt mask to stop board from generating interrupts */ 142 /* Clear interrupt mask to stop board from generating interrupts */
139 DEBUGOUT("Masking off all interrupts\n"); 143 pr_debug("Masking off all interrupts\n");
140 IXGB_WRITE_REG(hw, IMC, 0xFFFFFFFF); 144 IXGB_WRITE_REG(hw, IMC, 0xFFFFFFFF);
141 145
142 /* Disable the Transmit and Receive units. Then delay to allow 146 /* Disable the Transmit and Receive units. Then delay to allow
@@ -152,12 +156,12 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
152 * the current PCI configuration. The global reset bit is self- 156 * the current PCI configuration. The global reset bit is self-
153 * clearing, and should clear within a microsecond. 157 * clearing, and should clear within a microsecond.
154 */ 158 */
155 DEBUGOUT("Issuing a global reset to MAC\n"); 159 pr_debug("Issuing a global reset to MAC\n");
156 160
157 ctrl_reg = ixgb_mac_reset(hw); 161 ctrl_reg = ixgb_mac_reset(hw);
158 162
159 /* Clear interrupt mask to stop board from generating interrupts */ 163 /* Clear interrupt mask to stop board from generating interrupts */
160 DEBUGOUT("Masking off all interrupts\n"); 164 pr_debug("Masking off all interrupts\n");
161 IXGB_WRITE_REG(hw, IMC, 0xffffffff); 165 IXGB_WRITE_REG(hw, IMC, 0xffffffff);
162 166
163 /* Clear any pending interrupt events. */ 167 /* Clear any pending interrupt events. */
@@ -183,7 +187,7 @@ ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
183 u16 vendor_name[5]; 187 u16 vendor_name[5];
184 ixgb_xpak_vendor xpak_vendor; 188 ixgb_xpak_vendor xpak_vendor;
185 189
186 DEBUGFUNC("ixgb_identify_xpak_vendor"); 190 ENTER();
187 191
188 /* Read the first few bytes of the vendor string from the XPAK NVR 192 /* Read the first few bytes of the vendor string from the XPAK NVR
189 * registers. These are standard XENPAK/XPAK registers, so all XPAK 193 * registers. These are standard XENPAK/XPAK registers, so all XPAK
@@ -222,12 +226,12 @@ ixgb_identify_phy(struct ixgb_hw *hw)
222 ixgb_phy_type phy_type; 226 ixgb_phy_type phy_type;
223 ixgb_xpak_vendor xpak_vendor; 227 ixgb_xpak_vendor xpak_vendor;
224 228
225 DEBUGFUNC("ixgb_identify_phy"); 229 ENTER();
226 230
227 /* Infer the transceiver/phy type from the device id */ 231 /* Infer the transceiver/phy type from the device id */
228 switch (hw->device_id) { 232 switch (hw->device_id) {
229 case IXGB_DEVICE_ID_82597EX: 233 case IXGB_DEVICE_ID_82597EX:
230 DEBUGOUT("Identified TXN17401 optics\n"); 234 pr_debug("Identified TXN17401 optics\n");
231 phy_type = ixgb_phy_type_txn17401; 235 phy_type = ixgb_phy_type_txn17401;
232 break; 236 break;
233 237
@@ -237,30 +241,30 @@ ixgb_identify_phy(struct ixgb_hw *hw)
237 * type of optics. */ 241 * type of optics. */
238 xpak_vendor = ixgb_identify_xpak_vendor(hw); 242 xpak_vendor = ixgb_identify_xpak_vendor(hw);
239 if (xpak_vendor == ixgb_xpak_vendor_intel) { 243 if (xpak_vendor == ixgb_xpak_vendor_intel) {
240 DEBUGOUT("Identified TXN17201 optics\n"); 244 pr_debug("Identified TXN17201 optics\n");
241 phy_type = ixgb_phy_type_txn17201; 245 phy_type = ixgb_phy_type_txn17201;
242 } else { 246 } else {
243 DEBUGOUT("Identified G6005 optics\n"); 247 pr_debug("Identified G6005 optics\n");
244 phy_type = ixgb_phy_type_g6005; 248 phy_type = ixgb_phy_type_g6005;
245 } 249 }
246 break; 250 break;
247 case IXGB_DEVICE_ID_82597EX_LR: 251 case IXGB_DEVICE_ID_82597EX_LR:
248 DEBUGOUT("Identified G6104 optics\n"); 252 pr_debug("Identified G6104 optics\n");
249 phy_type = ixgb_phy_type_g6104; 253 phy_type = ixgb_phy_type_g6104;
250 break; 254 break;
251 case IXGB_DEVICE_ID_82597EX_CX4: 255 case IXGB_DEVICE_ID_82597EX_CX4:
252 DEBUGOUT("Identified CX4\n"); 256 pr_debug("Identified CX4\n");
253 xpak_vendor = ixgb_identify_xpak_vendor(hw); 257 xpak_vendor = ixgb_identify_xpak_vendor(hw);
254 if (xpak_vendor == ixgb_xpak_vendor_intel) { 258 if (xpak_vendor == ixgb_xpak_vendor_intel) {
255 DEBUGOUT("Identified TXN17201 optics\n"); 259 pr_debug("Identified TXN17201 optics\n");
256 phy_type = ixgb_phy_type_txn17201; 260 phy_type = ixgb_phy_type_txn17201;
257 } else { 261 } else {
258 DEBUGOUT("Identified G6005 optics\n"); 262 pr_debug("Identified G6005 optics\n");
259 phy_type = ixgb_phy_type_g6005; 263 phy_type = ixgb_phy_type_g6005;
260 } 264 }
261 break; 265 break;
262 default: 266 default:
263 DEBUGOUT("Unknown physical layer module\n"); 267 pr_debug("Unknown physical layer module\n");
264 phy_type = ixgb_phy_type_unknown; 268 phy_type = ixgb_phy_type_unknown;
265 break; 269 break;
266 } 270 }
@@ -296,18 +300,18 @@ ixgb_init_hw(struct ixgb_hw *hw)
296 u32 ctrl_reg; 300 u32 ctrl_reg;
297 bool status; 301 bool status;
298 302
299 DEBUGFUNC("ixgb_init_hw"); 303 ENTER();
300 304
301 /* Issue a global reset to the MAC. This will reset the chip's 305 /* Issue a global reset to the MAC. This will reset the chip's
302 * transmit, receive, DMA, and link units. It will not effect 306 * transmit, receive, DMA, and link units. It will not effect
303 * the current PCI configuration. The global reset bit is self- 307 * the current PCI configuration. The global reset bit is self-
304 * clearing, and should clear within a microsecond. 308 * clearing, and should clear within a microsecond.
305 */ 309 */
306 DEBUGOUT("Issuing a global reset to MAC\n"); 310 pr_debug("Issuing a global reset to MAC\n");
307 311
308 ctrl_reg = ixgb_mac_reset(hw); 312 ctrl_reg = ixgb_mac_reset(hw);
309 313
310 DEBUGOUT("Issuing an EE reset to MAC\n"); 314 pr_debug("Issuing an EE reset to MAC\n");
311#ifdef HP_ZX1 315#ifdef HP_ZX1
312 /* Workaround for 82597EX reset errata */ 316 /* Workaround for 82597EX reset errata */
313 IXGB_WRITE_REG_IO(hw, CTRL1, IXGB_CTRL1_EE_RST); 317 IXGB_WRITE_REG_IO(hw, CTRL1, IXGB_CTRL1_EE_RST);
@@ -335,7 +339,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
335 * If it is not valid, we fail hardware init. 339 * If it is not valid, we fail hardware init.
336 */ 340 */
337 if (!mac_addr_valid(hw->curr_mac_addr)) { 341 if (!mac_addr_valid(hw->curr_mac_addr)) {
338 DEBUGOUT("MAC address invalid after ixgb_init_rx_addrs\n"); 342 pr_debug("MAC address invalid after ixgb_init_rx_addrs\n");
339 return(false); 343 return(false);
340 } 344 }
341 345
@@ -346,7 +350,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
346 ixgb_get_bus_info(hw); 350 ixgb_get_bus_info(hw);
347 351
348 /* Zero out the Multicast HASH table */ 352 /* Zero out the Multicast HASH table */
349 DEBUGOUT("Zeroing the MTA\n"); 353 pr_debug("Zeroing the MTA\n");
350 for (i = 0; i < IXGB_MC_TBL_SIZE; i++) 354 for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
351 IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0); 355 IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
352 356
@@ -379,7 +383,7 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
379{ 383{
380 u32 i; 384 u32 i;
381 385
382 DEBUGFUNC("ixgb_init_rx_addrs"); 386 ENTER();
383 387
384 /* 388 /*
385 * If the current mac address is valid, assume it is a software override 389 * If the current mac address is valid, assume it is a software override
@@ -391,28 +395,19 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
391 /* Get the MAC address from the eeprom for later reference */ 395 /* Get the MAC address from the eeprom for later reference */
392 ixgb_get_ee_mac_addr(hw, hw->curr_mac_addr); 396 ixgb_get_ee_mac_addr(hw, hw->curr_mac_addr);
393 397
394 DEBUGOUT3(" Keeping Permanent MAC Addr =%.2X %.2X %.2X ", 398 pr_debug("Keeping Permanent MAC Addr = %pM\n",
395 hw->curr_mac_addr[0], 399 hw->curr_mac_addr);
396 hw->curr_mac_addr[1], hw->curr_mac_addr[2]);
397 DEBUGOUT3("%.2X %.2X %.2X\n",
398 hw->curr_mac_addr[3],
399 hw->curr_mac_addr[4], hw->curr_mac_addr[5]);
400 } else { 400 } else {
401 401
402 /* Setup the receive address. */ 402 /* Setup the receive address. */
403 DEBUGOUT("Overriding MAC Address in RAR[0]\n"); 403 pr_debug("Overriding MAC Address in RAR[0]\n");
404 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ", 404 pr_debug("New MAC Addr = %pM\n", hw->curr_mac_addr);
405 hw->curr_mac_addr[0],
406 hw->curr_mac_addr[1], hw->curr_mac_addr[2]);
407 DEBUGOUT3("%.2X %.2X %.2X\n",
408 hw->curr_mac_addr[3],
409 hw->curr_mac_addr[4], hw->curr_mac_addr[5]);
410 405
411 ixgb_rar_set(hw, hw->curr_mac_addr, 0); 406 ixgb_rar_set(hw, hw->curr_mac_addr, 0);
412 } 407 }
413 408
414 /* Zero out the other 15 receive addresses. */ 409 /* Zero out the other 15 receive addresses. */
415 DEBUGOUT("Clearing RAR[1-15]\n"); 410 pr_debug("Clearing RAR[1-15]\n");
416 for (i = 1; i < IXGB_RAR_ENTRIES; i++) { 411 for (i = 1; i < IXGB_RAR_ENTRIES; i++) {
417 /* Write high reg first to disable the AV bit first */ 412 /* Write high reg first to disable the AV bit first */
418 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 413 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
@@ -444,64 +439,50 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw,
444 u32 hash_value; 439 u32 hash_value;
445 u32 i; 440 u32 i;
446 u32 rar_used_count = 1; /* RAR[0] is used for our MAC address */ 441 u32 rar_used_count = 1; /* RAR[0] is used for our MAC address */
442 u8 *mca;
447 443
448 DEBUGFUNC("ixgb_mc_addr_list_update"); 444 ENTER();
449 445
450 /* Set the new number of MC addresses that we are being requested to use. */ 446 /* Set the new number of MC addresses that we are being requested to use. */
451 hw->num_mc_addrs = mc_addr_count; 447 hw->num_mc_addrs = mc_addr_count;
452 448
453 /* Clear RAR[1-15] */ 449 /* Clear RAR[1-15] */
454 DEBUGOUT(" Clearing RAR[1-15]\n"); 450 pr_debug("Clearing RAR[1-15]\n");
455 for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) { 451 for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) {
456 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 452 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
457 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 453 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
458 } 454 }
459 455
460 /* Clear the MTA */ 456 /* Clear the MTA */
461 DEBUGOUT(" Clearing MTA\n"); 457 pr_debug("Clearing MTA\n");
462 for (i = 0; i < IXGB_MC_TBL_SIZE; i++) 458 for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
463 IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0); 459 IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
464 460
465 /* Add the new addresses */ 461 /* Add the new addresses */
462 mca = mc_addr_list;
466 for (i = 0; i < mc_addr_count; i++) { 463 for (i = 0; i < mc_addr_count; i++) {
467 DEBUGOUT(" Adding the multicast addresses:\n"); 464 pr_debug("Adding the multicast addresses:\n");
468 DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i, 465 pr_debug("MC Addr #%d = %pM\n", i, mca);
469 mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)],
470 mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
471 1],
472 mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
473 2],
474 mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
475 3],
476 mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
477 4],
478 mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
479 5]);
480 466
481 /* Place this multicast address in the RAR if there is room, * 467 /* Place this multicast address in the RAR if there is room, *
482 * else put it in the MTA 468 * else put it in the MTA
483 */ 469 */
484 if (rar_used_count < IXGB_RAR_ENTRIES) { 470 if (rar_used_count < IXGB_RAR_ENTRIES) {
485 ixgb_rar_set(hw, 471 ixgb_rar_set(hw, mca, rar_used_count);
486 mc_addr_list + 472 pr_debug("Added a multicast address to RAR[%d]\n", i);
487 (i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)),
488 rar_used_count);
489 DEBUGOUT1("Added a multicast address to RAR[%d]\n", i);
490 rar_used_count++; 473 rar_used_count++;
491 } else { 474 } else {
492 hash_value = ixgb_hash_mc_addr(hw, 475 hash_value = ixgb_hash_mc_addr(hw, mca);
493 mc_addr_list +
494 (i *
495 (IXGB_ETH_LENGTH_OF_ADDRESS
496 + pad)));
497 476
498 DEBUGOUT1(" Hash value = 0x%03X\n", hash_value); 477 pr_debug("Hash value = 0x%03X\n", hash_value);
499 478
500 ixgb_mta_set(hw, hash_value); 479 ixgb_mta_set(hw, hash_value);
501 } 480 }
481
482 mca += IXGB_ETH_LENGTH_OF_ADDRESS + pad;
502 } 483 }
503 484
504 DEBUGOUT("MC Update Complete\n"); 485 pr_debug("MC Update Complete\n");
505 return; 486 return;
506} 487}
507 488
@@ -520,7 +501,7 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw,
520{ 501{
521 u32 hash_value = 0; 502 u32 hash_value = 0;
522 503
523 DEBUGFUNC("ixgb_hash_mc_addr"); 504 ENTER();
524 505
525 /* The portion of the address that is used for the hash table is 506 /* The portion of the address that is used for the hash table is
526 * determined by the mc_filter_type setting. 507 * determined by the mc_filter_type setting.
@@ -547,7 +528,7 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw,
547 break; 528 break;
548 default: 529 default:
549 /* Invalid mc_filter_type, what should we do? */ 530 /* Invalid mc_filter_type, what should we do? */
550 DEBUGOUT("MC filter type param set incorrectly\n"); 531 pr_debug("MC filter type param set incorrectly\n");
551 ASSERT(0); 532 ASSERT(0);
552 break; 533 break;
553 } 534 }
@@ -603,7 +584,7 @@ ixgb_rar_set(struct ixgb_hw *hw,
603{ 584{
604 u32 rar_low, rar_high; 585 u32 rar_low, rar_high;
605 586
606 DEBUGFUNC("ixgb_rar_set"); 587 ENTER();
607 588
608 /* HW expects these in little endian so we reverse the byte order 589 /* HW expects these in little endian so we reverse the byte order
609 * from network order (big endian) to little endian 590 * from network order (big endian) to little endian
@@ -666,7 +647,7 @@ ixgb_setup_fc(struct ixgb_hw *hw)
666 u32 pap_reg = 0; /* by default, assume no pause time */ 647 u32 pap_reg = 0; /* by default, assume no pause time */
667 bool status = true; 648 bool status = true;
668 649
669 DEBUGFUNC("ixgb_setup_fc"); 650 ENTER();
670 651
671 /* Get the current control reg 0 settings */ 652 /* Get the current control reg 0 settings */
672 ctrl_reg = IXGB_READ_REG(hw, CTRL0); 653 ctrl_reg = IXGB_READ_REG(hw, CTRL0);
@@ -710,7 +691,7 @@ ixgb_setup_fc(struct ixgb_hw *hw)
710 break; 691 break;
711 default: 692 default:
712 /* We should never get here. The value should be 0-3. */ 693 /* We should never get here. The value should be 0-3. */
713 DEBUGOUT("Flow control param set incorrectly\n"); 694 pr_debug("Flow control param set incorrectly\n");
714 ASSERT(0); 695 ASSERT(0);
715 break; 696 break;
716 } 697 }
@@ -940,7 +921,7 @@ ixgb_check_for_link(struct ixgb_hw *hw)
940 u32 status_reg; 921 u32 status_reg;
941 u32 xpcss_reg; 922 u32 xpcss_reg;
942 923
943 DEBUGFUNC("ixgb_check_for_link"); 924 ENTER();
944 925
945 xpcss_reg = IXGB_READ_REG(hw, XPCSS); 926 xpcss_reg = IXGB_READ_REG(hw, XPCSS);
946 status_reg = IXGB_READ_REG(hw, STATUS); 927 status_reg = IXGB_READ_REG(hw, STATUS);
@@ -950,7 +931,7 @@ ixgb_check_for_link(struct ixgb_hw *hw)
950 hw->link_up = true; 931 hw->link_up = true;
951 } else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) && 932 } else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
952 (status_reg & IXGB_STATUS_LU)) { 933 (status_reg & IXGB_STATUS_LU)) {
953 DEBUGOUT("XPCSS Not Aligned while Status:LU is set.\n"); 934 pr_debug("XPCSS Not Aligned while Status:LU is set\n");
954 hw->link_up = ixgb_link_reset(hw); 935 hw->link_up = ixgb_link_reset(hw);
955 } else { 936 } else {
956 /* 937 /*
@@ -981,8 +962,7 @@ bool ixgb_check_for_bad_link(struct ixgb_hw *hw)
981 newRFC = IXGB_READ_REG(hw, RFC); 962 newRFC = IXGB_READ_REG(hw, RFC);
982 if ((hw->lastLFC + 250 < newLFC) 963 if ((hw->lastLFC + 250 < newLFC)
983 || (hw->lastRFC + 250 < newRFC)) { 964 || (hw->lastRFC + 250 < newRFC)) {
984 DEBUGOUT 965 pr_debug("BAD LINK! too many LFC/RFC since last check\n");
985 ("BAD LINK! too many LFC/RFC since last check\n");
986 bad_link_returncode = true; 966 bad_link_returncode = true;
987 } 967 }
988 hw->lastLFC = newLFC; 968 hw->lastLFC = newLFC;
@@ -1002,11 +982,11 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
1002{ 982{
1003 volatile u32 temp_reg; 983 volatile u32 temp_reg;
1004 984
1005 DEBUGFUNC("ixgb_clear_hw_cntrs"); 985 ENTER();
1006 986
1007 /* if we are stopped or resetting exit gracefully */ 987 /* if we are stopped or resetting exit gracefully */
1008 if (hw->adapter_stopped) { 988 if (hw->adapter_stopped) {
1009 DEBUGOUT("Exiting because the adapter is stopped!!!\n"); 989 pr_debug("Exiting because the adapter is stopped!!!\n");
1010 return; 990 return;
1011 } 991 }
1012 992
@@ -1156,26 +1136,21 @@ static bool
1156mac_addr_valid(u8 *mac_addr) 1136mac_addr_valid(u8 *mac_addr)
1157{ 1137{
1158 bool is_valid = true; 1138 bool is_valid = true;
1159 DEBUGFUNC("mac_addr_valid"); 1139 ENTER();
1160 1140
1161 /* Make sure it is not a multicast address */ 1141 /* Make sure it is not a multicast address */
1162 if (IS_MULTICAST(mac_addr)) { 1142 if (is_multicast_ether_addr(mac_addr)) {
1163 DEBUGOUT("MAC address is multicast\n"); 1143 pr_debug("MAC address is multicast\n");
1164 is_valid = false; 1144 is_valid = false;
1165 } 1145 }
1166 /* Not a broadcast address */ 1146 /* Not a broadcast address */
1167 else if (IS_BROADCAST(mac_addr)) { 1147 else if (is_broadcast_ether_addr(mac_addr)) {
1168 DEBUGOUT("MAC address is broadcast\n"); 1148 pr_debug("MAC address is broadcast\n");
1169 is_valid = false; 1149 is_valid = false;
1170 } 1150 }
1171 /* Reject the zero address */ 1151 /* Reject the zero address */
1172 else if (mac_addr[0] == 0 && 1152 else if (is_zero_ether_addr(mac_addr)) {
1173 mac_addr[1] == 0 && 1153 pr_debug("MAC address is all zeros\n");
1174 mac_addr[2] == 0 &&
1175 mac_addr[3] == 0 &&
1176 mac_addr[4] == 0 &&
1177 mac_addr[5] == 0) {
1178 DEBUGOUT("MAC address is all zeros\n");
1179 is_valid = false; 1154 is_valid = false;
1180 } 1155 }
1181 return (is_valid); 1156 return (is_valid);
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h
index af6ca3aab5ad..873d32b89fba 100644
--- a/drivers/net/ixgb/ixgb_hw.h
+++ b/drivers/net/ixgb/ixgb_hw.h
@@ -636,18 +636,6 @@ struct ixgb_flash_buffer {
636 u8 filler3[0xAAAA]; 636 u8 filler3[0xAAAA];
637}; 637};
638 638
639/*
640 * This is a little-endian specific check.
641 */
642#define IS_MULTICAST(Address) \
643 (bool)(((u8 *)(Address))[0] & ((u8)0x01))
644
645/*
646 * Check whether an address is broadcast.
647 */
648#define IS_BROADCAST(Address) \
649 ((((u8 *)(Address))[0] == ((u8)0xff)) && (((u8 *)(Address))[1] == ((u8)0xff)))
650
651/* Flow control parameters */ 639/* Flow control parameters */
652struct ixgb_fc { 640struct ixgb_fc {
653 u32 high_water; /* Flow Control High-water */ 641 u32 high_water; /* Flow Control High-water */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 912dd1d5772c..d58ca6b578cc 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -26,6 +26,8 @@
26 26
27*******************************************************************************/ 27*******************************************************************************/
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include "ixgb.h" 31#include "ixgb.h"
30 32
31char ixgb_driver_name[] = "ixgb"; 33char ixgb_driver_name[] = "ixgb";
@@ -146,10 +148,8 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
146static int __init 148static int __init
147ixgb_init_module(void) 149ixgb_init_module(void)
148{ 150{
149 printk(KERN_INFO "%s - version %s\n", 151 pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version);
150 ixgb_driver_string, ixgb_driver_version); 152 pr_info("%s\n", ixgb_copyright);
151
152 printk(KERN_INFO "%s\n", ixgb_copyright);
153 153
154 return pci_register_driver(&ixgb_driver); 154 return pci_register_driver(&ixgb_driver);
155} 155}
@@ -368,17 +368,22 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
368 if (err) 368 if (err)
369 return err; 369 return err;
370 370
371 if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) && 371 pci_using_dac = 0;
372 !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) { 372 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
373 pci_using_dac = 1; 373 if (!err) {
374 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
375 if (!err)
376 pci_using_dac = 1;
374 } else { 377 } else {
375 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) || 378 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
376 (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) { 379 if (err) {
377 printk(KERN_ERR 380 err = dma_set_coherent_mask(&pdev->dev,
378 "ixgb: No usable DMA configuration, aborting\n"); 381 DMA_BIT_MASK(32));
379 goto err_dma_mask; 382 if (err) {
383 pr_err("No usable DMA configuration, aborting\n");
384 goto err_dma_mask;
385 }
380 } 386 }
381 pci_using_dac = 0;
382 } 387 }
383 388
384 err = pci_request_regions(pdev, ixgb_driver_name); 389 err = pci_request_regions(pdev, ixgb_driver_name);
@@ -674,7 +679,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
674 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); 679 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
675 txdr->size = ALIGN(txdr->size, 4096); 680 txdr->size = ALIGN(txdr->size, 4096);
676 681
677 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 682 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
683 GFP_KERNEL);
678 if (!txdr->desc) { 684 if (!txdr->desc) {
679 vfree(txdr->buffer_info); 685 vfree(txdr->buffer_info);
680 netif_err(adapter, probe, adapter->netdev, 686 netif_err(adapter, probe, adapter->netdev,
@@ -763,7 +769,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
763 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); 769 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
764 rxdr->size = ALIGN(rxdr->size, 4096); 770 rxdr->size = ALIGN(rxdr->size, 4096);
765 771
766 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 772 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
773 GFP_KERNEL);
767 774
768 if (!rxdr->desc) { 775 if (!rxdr->desc) {
769 vfree(rxdr->buffer_info); 776 vfree(rxdr->buffer_info);
@@ -884,8 +891,8 @@ ixgb_free_tx_resources(struct ixgb_adapter *adapter)
884 vfree(adapter->tx_ring.buffer_info); 891 vfree(adapter->tx_ring.buffer_info);
885 adapter->tx_ring.buffer_info = NULL; 892 adapter->tx_ring.buffer_info = NULL;
886 893
887 pci_free_consistent(pdev, adapter->tx_ring.size, 894 dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
888 adapter->tx_ring.desc, adapter->tx_ring.dma); 895 adapter->tx_ring.desc, adapter->tx_ring.dma);
889 896
890 adapter->tx_ring.desc = NULL; 897 adapter->tx_ring.desc = NULL;
891} 898}
@@ -896,12 +903,11 @@ ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
896{ 903{
897 if (buffer_info->dma) { 904 if (buffer_info->dma) {
898 if (buffer_info->mapped_as_page) 905 if (buffer_info->mapped_as_page)
899 pci_unmap_page(adapter->pdev, buffer_info->dma, 906 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
900 buffer_info->length, PCI_DMA_TODEVICE); 907 buffer_info->length, DMA_TO_DEVICE);
901 else 908 else
902 pci_unmap_single(adapter->pdev, buffer_info->dma, 909 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
903 buffer_info->length, 910 buffer_info->length, DMA_TO_DEVICE);
904 PCI_DMA_TODEVICE);
905 buffer_info->dma = 0; 911 buffer_info->dma = 0;
906 } 912 }
907 913
@@ -967,7 +973,8 @@ ixgb_free_rx_resources(struct ixgb_adapter *adapter)
967 vfree(rx_ring->buffer_info); 973 vfree(rx_ring->buffer_info);
968 rx_ring->buffer_info = NULL; 974 rx_ring->buffer_info = NULL;
969 975
970 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 976 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
977 rx_ring->dma);
971 978
972 rx_ring->desc = NULL; 979 rx_ring->desc = NULL;
973} 980}
@@ -991,10 +998,10 @@ ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
991 for (i = 0; i < rx_ring->count; i++) { 998 for (i = 0; i < rx_ring->count; i++) {
992 buffer_info = &rx_ring->buffer_info[i]; 999 buffer_info = &rx_ring->buffer_info[i];
993 if (buffer_info->dma) { 1000 if (buffer_info->dma) {
994 pci_unmap_single(pdev, 1001 dma_unmap_single(&pdev->dev,
995 buffer_info->dma, 1002 buffer_info->dma,
996 buffer_info->length, 1003 buffer_info->length,
997 PCI_DMA_FROMDEVICE); 1004 DMA_FROM_DEVICE);
998 buffer_info->dma = 0; 1005 buffer_info->dma = 0;
999 buffer_info->length = 0; 1006 buffer_info->length = 0;
1000 } 1007 }
@@ -1118,15 +1125,14 @@ ixgb_watchdog(unsigned long data)
1118 1125
1119 if (adapter->hw.link_up) { 1126 if (adapter->hw.link_up) {
1120 if (!netif_carrier_ok(netdev)) { 1127 if (!netif_carrier_ok(netdev)) {
1121 printk(KERN_INFO "ixgb: %s NIC Link is Up 10 Gbps " 1128 netdev_info(netdev,
1122 "Full Duplex, Flow Control: %s\n", 1129 "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
1123 netdev->name, 1130 (adapter->hw.fc.type == ixgb_fc_full) ?
1124 (adapter->hw.fc.type == ixgb_fc_full) ? 1131 "RX/TX" :
1125 "RX/TX" : 1132 (adapter->hw.fc.type == ixgb_fc_rx_pause) ?
1126 ((adapter->hw.fc.type == ixgb_fc_rx_pause) ? 1133 "RX" :
1127 "RX" : 1134 (adapter->hw.fc.type == ixgb_fc_tx_pause) ?
1128 ((adapter->hw.fc.type == ixgb_fc_tx_pause) ? 1135 "TX" : "None");
1129 "TX" : "None")));
1130 adapter->link_speed = 10000; 1136 adapter->link_speed = 10000;
1131 adapter->link_duplex = FULL_DUPLEX; 1137 adapter->link_duplex = FULL_DUPLEX;
1132 netif_carrier_on(netdev); 1138 netif_carrier_on(netdev);
@@ -1135,8 +1141,7 @@ ixgb_watchdog(unsigned long data)
1135 if (netif_carrier_ok(netdev)) { 1141 if (netif_carrier_ok(netdev)) {
1136 adapter->link_speed = 0; 1142 adapter->link_speed = 0;
1137 adapter->link_duplex = 0; 1143 adapter->link_duplex = 0;
1138 printk(KERN_INFO "ixgb: %s NIC Link is Down\n", 1144 netdev_info(netdev, "NIC Link is Down\n");
1139 netdev->name);
1140 netif_carrier_off(netdev); 1145 netif_carrier_off(netdev);
1141 } 1146 }
1142 } 1147 }
@@ -1303,9 +1308,10 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1303 WARN_ON(buffer_info->dma != 0); 1308 WARN_ON(buffer_info->dma != 0);
1304 buffer_info->time_stamp = jiffies; 1309 buffer_info->time_stamp = jiffies;
1305 buffer_info->mapped_as_page = false; 1310 buffer_info->mapped_as_page = false;
1306 buffer_info->dma = pci_map_single(pdev, skb->data + offset, 1311 buffer_info->dma = dma_map_single(&pdev->dev,
1307 size, PCI_DMA_TODEVICE); 1312 skb->data + offset,
1308 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 1313 size, DMA_TO_DEVICE);
1314 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1309 goto dma_error; 1315 goto dma_error;
1310 buffer_info->next_to_watch = 0; 1316 buffer_info->next_to_watch = 0;
1311 1317
@@ -1344,10 +1350,9 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1344 buffer_info->time_stamp = jiffies; 1350 buffer_info->time_stamp = jiffies;
1345 buffer_info->mapped_as_page = true; 1351 buffer_info->mapped_as_page = true;
1346 buffer_info->dma = 1352 buffer_info->dma =
1347 pci_map_page(pdev, frag->page, 1353 dma_map_page(&pdev->dev, frag->page,
1348 offset, size, 1354 offset, size, DMA_TO_DEVICE);
1349 PCI_DMA_TODEVICE); 1355 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1350 if (pci_dma_mapping_error(pdev, buffer_info->dma))
1351 goto dma_error; 1356 goto dma_error;
1352 buffer_info->next_to_watch = 0; 1357 buffer_info->next_to_watch = 0;
1353 1358
@@ -1965,10 +1970,10 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1965 cleaned = true; 1970 cleaned = true;
1966 cleaned_count++; 1971 cleaned_count++;
1967 1972
1968 pci_unmap_single(pdev, 1973 dma_unmap_single(&pdev->dev,
1969 buffer_info->dma, 1974 buffer_info->dma,
1970 buffer_info->length, 1975 buffer_info->length,
1971 PCI_DMA_FROMDEVICE); 1976 DMA_FROM_DEVICE);
1972 buffer_info->dma = 0; 1977 buffer_info->dma = 0;
1973 1978
1974 length = le16_to_cpu(rx_desc->length); 1979 length = le16_to_cpu(rx_desc->length);
@@ -2091,10 +2096,10 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
2091 buffer_info->skb = skb; 2096 buffer_info->skb = skb;
2092 buffer_info->length = adapter->rx_buffer_len; 2097 buffer_info->length = adapter->rx_buffer_len;
2093map_skb: 2098map_skb:
2094 buffer_info->dma = pci_map_single(pdev, 2099 buffer_info->dma = dma_map_single(&pdev->dev,
2095 skb->data, 2100 skb->data,
2096 adapter->rx_buffer_len, 2101 adapter->rx_buffer_len,
2097 PCI_DMA_FROMDEVICE); 2102 DMA_FROM_DEVICE);
2098 2103
2099 rx_desc = IXGB_RX_DESC(*rx_ring, i); 2104 rx_desc = IXGB_RX_DESC(*rx_ring, i);
2100 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); 2105 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
@@ -2322,7 +2327,7 @@ static void ixgb_io_resume(struct pci_dev *pdev)
2322 2327
2323 if (netif_running(netdev)) { 2328 if (netif_running(netdev)) {
2324 if (ixgb_up(adapter)) { 2329 if (ixgb_up(adapter)) {
2325 printk ("ixgb: can't bring device back up after reset\n"); 2330 pr_err("can't bring device back up after reset\n");
2326 return; 2331 return;
2327 } 2332 }
2328 } 2333 }
diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h
index 371a6be4d965..e361185920ef 100644
--- a/drivers/net/ixgb/ixgb_osdep.h
+++ b/drivers/net/ixgb/ixgb_osdep.h
@@ -41,20 +41,8 @@
41 41
42#undef ASSERT 42#undef ASSERT
43#define ASSERT(x) BUG_ON(!(x)) 43#define ASSERT(x) BUG_ON(!(x))
44#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B) 44
45 45#define ENTER() pr_debug("%s\n", __func__);
46#ifdef DBG
47#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
48#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
49#else
50#define DEBUGOUT(S)
51#define DEBUGOUT1(S, A...)
52#endif
53
54#define DEBUGFUNC(F) DEBUGOUT(F)
55#define DEBUGOUT2 DEBUGOUT1
56#define DEBUGOUT3 DEBUGOUT2
57#define DEBUGOUT7 DEBUGOUT3
58 46
59#define IXGB_WRITE_REG(a, reg, value) ( \ 47#define IXGB_WRITE_REG(a, reg, value) ( \
60 writel((value), ((a)->hw_addr + IXGB_##reg))) 48 writel((value), ((a)->hw_addr + IXGB_##reg)))
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
index af35e1ddadd6..88a08f056241 100644
--- a/drivers/net/ixgb/ixgb_param.c
+++ b/drivers/net/ixgb/ixgb_param.c
@@ -26,6 +26,8 @@
26 26
27*******************************************************************************/ 27*******************************************************************************/
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include "ixgb.h" 31#include "ixgb.h"
30 32
31/* This is the only thing that needs to be changed to adjust the 33/* This is the only thing that needs to be changed to adjust the
@@ -209,16 +211,16 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
209 case enable_option: 211 case enable_option:
210 switch (*value) { 212 switch (*value) {
211 case OPTION_ENABLED: 213 case OPTION_ENABLED:
212 printk(KERN_INFO "%s Enabled\n", opt->name); 214 pr_info("%s Enabled\n", opt->name);
213 return 0; 215 return 0;
214 case OPTION_DISABLED: 216 case OPTION_DISABLED:
215 printk(KERN_INFO "%s Disabled\n", opt->name); 217 pr_info("%s Disabled\n", opt->name);
216 return 0; 218 return 0;
217 } 219 }
218 break; 220 break;
219 case range_option: 221 case range_option:
220 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { 222 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
221 printk(KERN_INFO "%s set to %i\n", opt->name, *value); 223 pr_info("%s set to %i\n", opt->name, *value);
222 return 0; 224 return 0;
223 } 225 }
224 break; 226 break;
@@ -230,7 +232,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
230 ent = &opt->arg.l.p[i]; 232 ent = &opt->arg.l.p[i];
231 if (*value == ent->i) { 233 if (*value == ent->i) {
232 if (ent->str[0] != '\0') 234 if (ent->str[0] != '\0')
233 printk(KERN_INFO "%s\n", ent->str); 235 pr_info("%s\n", ent->str);
234 return 0; 236 return 0;
235 } 237 }
236 } 238 }
@@ -240,8 +242,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
240 BUG(); 242 BUG();
241 } 243 }
242 244
243 printk(KERN_INFO "Invalid %s specified (%i) %s\n", 245 pr_info("Invalid %s specified (%i) %s\n", opt->name, *value, opt->err);
244 opt->name, *value, opt->err);
245 *value = opt->def; 246 *value = opt->def;
246 return -1; 247 return -1;
247} 248}
@@ -261,9 +262,8 @@ ixgb_check_options(struct ixgb_adapter *adapter)
261{ 262{
262 int bd = adapter->bd_number; 263 int bd = adapter->bd_number;
263 if (bd >= IXGB_MAX_NIC) { 264 if (bd >= IXGB_MAX_NIC) {
264 printk(KERN_NOTICE 265 pr_notice("Warning: no configuration for board #%i\n", bd);
265 "Warning: no configuration for board #%i\n", bd); 266 pr_notice("Using defaults for all values\n");
266 printk(KERN_NOTICE "Using defaults for all values\n");
267 } 267 }
268 268
269 { /* Transmit Descriptor Count */ 269 { /* Transmit Descriptor Count */
@@ -363,8 +363,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
363 adapter->hw.fc.high_water = opt.def; 363 adapter->hw.fc.high_water = opt.def;
364 } 364 }
365 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) 365 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
366 printk(KERN_INFO 366 pr_info("Ignoring RxFCHighThresh when no RxFC\n");
367 "Ignoring RxFCHighThresh when no RxFC\n");
368 } 367 }
369 { /* Receive Flow Control Low Threshold */ 368 { /* Receive Flow Control Low Threshold */
370 const struct ixgb_option opt = { 369 const struct ixgb_option opt = {
@@ -383,8 +382,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
383 adapter->hw.fc.low_water = opt.def; 382 adapter->hw.fc.low_water = opt.def;
384 } 383 }
385 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) 384 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
386 printk(KERN_INFO 385 pr_info("Ignoring RxFCLowThresh when no RxFC\n");
387 "Ignoring RxFCLowThresh when no RxFC\n");
388 } 386 }
389 { /* Flow Control Pause Time Request*/ 387 { /* Flow Control Pause Time Request*/
390 const struct ixgb_option opt = { 388 const struct ixgb_option opt = {
@@ -404,17 +402,14 @@ ixgb_check_options(struct ixgb_adapter *adapter)
404 adapter->hw.fc.pause_time = opt.def; 402 adapter->hw.fc.pause_time = opt.def;
405 } 403 }
406 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) 404 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
407 printk(KERN_INFO 405 pr_info("Ignoring FCReqTimeout when no RxFC\n");
408 "Ignoring FCReqTimeout when no RxFC\n");
409 } 406 }
410 /* high low and spacing check for rx flow control thresholds */ 407 /* high low and spacing check for rx flow control thresholds */
411 if (adapter->hw.fc.type & ixgb_fc_tx_pause) { 408 if (adapter->hw.fc.type & ixgb_fc_tx_pause) {
412 /* high must be greater than low */ 409 /* high must be greater than low */
413 if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) { 410 if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) {
414 /* set defaults */ 411 /* set defaults */
415 printk(KERN_INFO 412 pr_info("RxFCHighThresh must be >= (RxFCLowThresh + 8), Using Defaults\n");
416 "RxFCHighThresh must be >= (RxFCLowThresh + 8), "
417 "Using Defaults\n");
418 adapter->hw.fc.high_water = DEFAULT_FCRTH; 413 adapter->hw.fc.high_water = DEFAULT_FCRTH;
419 adapter->hw.fc.low_water = DEFAULT_FCRTL; 414 adapter->hw.fc.low_water = DEFAULT_FCRTL;
420 } 415 }
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 79c35ae3718c..d0ea3d6dea95 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -111,7 +111,10 @@ struct vf_data_storage {
111 u16 default_vf_vlan_id; 111 u16 default_vf_vlan_id;
112 u16 vlans_enabled; 112 u16 vlans_enabled;
113 bool clear_to_send; 113 bool clear_to_send;
114 bool pf_set_mac;
114 int rar; 115 int rar;
116 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
117 u16 pf_qos;
115}; 118};
116 119
117/* wrapper around a pointer to a socket buffer, 120/* wrapper around a pointer to a socket buffer,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index f894bb633040..38c384031c4c 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -39,6 +39,8 @@
39#define IXGBE_82599_MC_TBL_SIZE 128 39#define IXGBE_82599_MC_TBL_SIZE 128
40#define IXGBE_82599_VFT_TBL_SIZE 128 40#define IXGBE_82599_VFT_TBL_SIZE 128
41 41
42void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
43void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
42void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 44void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
43s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 45s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
44 ixgbe_link_speed speed, 46 ixgbe_link_speed speed,
@@ -69,8 +71,14 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
69 if (hw->phy.multispeed_fiber) { 71 if (hw->phy.multispeed_fiber) {
70 /* Set up dual speed SFP+ support */ 72 /* Set up dual speed SFP+ support */
71 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; 73 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
74 mac->ops.disable_tx_laser =
75 &ixgbe_disable_tx_laser_multispeed_fiber;
76 mac->ops.enable_tx_laser =
77 &ixgbe_enable_tx_laser_multispeed_fiber;
72 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; 78 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
73 } else { 79 } else {
80 mac->ops.disable_tx_laser = NULL;
81 mac->ops.enable_tx_laser = NULL;
74 mac->ops.flap_tx_laser = NULL; 82 mac->ops.flap_tx_laser = NULL;
75 if ((mac->ops.get_media_type(hw) == 83 if ((mac->ops.get_media_type(hw) ==
76 ixgbe_media_type_backplane) && 84 ixgbe_media_type_backplane) &&
@@ -415,6 +423,44 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
415 return status; 423 return status;
416} 424}
417 425
426 /**
427 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
428 * @hw: pointer to hardware structure
429 *
430 * The base drivers may require better control over SFP+ module
431 * PHY states. This includes selectively shutting down the Tx
432 * laser on the PHY, effectively halting physical link.
433 **/
434void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
435{
436 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
437
438 /* Disable tx laser; allow 100us to go dark per spec */
439 esdp_reg |= IXGBE_ESDP_SDP3;
440 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
441 IXGBE_WRITE_FLUSH(hw);
442 udelay(100);
443}
444
445/**
446 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
447 * @hw: pointer to hardware structure
448 *
449 * The base drivers may require better control over SFP+ module
450 * PHY states. This includes selectively turning on the Tx
451 * laser on the PHY, effectively starting physical link.
452 **/
453void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
454{
455 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
456
457 /* Enable tx laser; allow 100ms to light up */
458 esdp_reg &= ~IXGBE_ESDP_SDP3;
459 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
460 IXGBE_WRITE_FLUSH(hw);
461 msleep(100);
462}
463
418/** 464/**
419 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser 465 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
420 * @hw: pointer to hardware structure 466 * @hw: pointer to hardware structure
@@ -429,23 +475,11 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
429 **/ 475 **/
430void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 476void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
431{ 477{
432 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
433
434 hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n"); 478 hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n");
435 479
436 if (hw->mac.autotry_restart) { 480 if (hw->mac.autotry_restart) {
437 /* Disable tx laser; allow 100us to go dark per spec */ 481 ixgbe_disable_tx_laser_multispeed_fiber(hw);
438 esdp_reg |= IXGBE_ESDP_SDP3; 482 ixgbe_enable_tx_laser_multispeed_fiber(hw);
439 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
440 IXGBE_WRITE_FLUSH(hw);
441 udelay(100);
442
443 /* Enable tx laser; allow 100ms to light up */
444 esdp_reg &= ~IXGBE_ESDP_SDP3;
445 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
446 IXGBE_WRITE_FLUSH(hw);
447 msleep(100);
448
449 hw->mac.autotry_restart = false; 483 hw->mac.autotry_restart = false;
450 } 484 }
451} 485}
@@ -608,6 +642,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
608 s32 i, j; 642 s32 i, j;
609 bool link_up = false; 643 bool link_up = false;
610 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 644 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
645 struct ixgbe_adapter *adapter = hw->back;
611 646
612 hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n"); 647 hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n");
613 648
@@ -692,6 +727,10 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
692 autoneg_wait_to_complete); 727 autoneg_wait_to_complete);
693 728
694out: 729out:
730 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
731 netif_info(adapter, hw, adapter->netdev, "Smartspeed has"
732 " downgraded the link speed from the maximum"
733 " advertised\n");
695 return status; 734 return status;
696} 735}
697 736
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 8f461d5cee77..dc7fd5b70bc3 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -365,7 +365,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
365 else 365 else
366 fc.disable_fc_autoneg = false; 366 fc.disable_fc_autoneg = false;
367 367
368 if (pause->rx_pause && pause->tx_pause) 368 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
369 fc.requested_mode = ixgbe_fc_full; 369 fc.requested_mode = ixgbe_fc_full;
370 else if (pause->rx_pause && !pause->tx_pause) 370 else if (pause->rx_pause && !pause->tx_pause)
371 fc.requested_mode = ixgbe_fc_rx_pause; 371 fc.requested_mode = ixgbe_fc_rx_pause;
@@ -1458,8 +1458,8 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1458 struct ixgbe_tx_buffer *buf = 1458 struct ixgbe_tx_buffer *buf =
1459 &(tx_ring->tx_buffer_info[i]); 1459 &(tx_ring->tx_buffer_info[i]);
1460 if (buf->dma) 1460 if (buf->dma)
1461 pci_unmap_single(pdev, buf->dma, buf->length, 1461 dma_unmap_single(&pdev->dev, buf->dma,
1462 PCI_DMA_TODEVICE); 1462 buf->length, DMA_TO_DEVICE);
1463 if (buf->skb) 1463 if (buf->skb)
1464 dev_kfree_skb(buf->skb); 1464 dev_kfree_skb(buf->skb);
1465 } 1465 }
@@ -1470,22 +1470,22 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1470 struct ixgbe_rx_buffer *buf = 1470 struct ixgbe_rx_buffer *buf =
1471 &(rx_ring->rx_buffer_info[i]); 1471 &(rx_ring->rx_buffer_info[i]);
1472 if (buf->dma) 1472 if (buf->dma)
1473 pci_unmap_single(pdev, buf->dma, 1473 dma_unmap_single(&pdev->dev, buf->dma,
1474 IXGBE_RXBUFFER_2048, 1474 IXGBE_RXBUFFER_2048,
1475 PCI_DMA_FROMDEVICE); 1475 DMA_FROM_DEVICE);
1476 if (buf->skb) 1476 if (buf->skb)
1477 dev_kfree_skb(buf->skb); 1477 dev_kfree_skb(buf->skb);
1478 } 1478 }
1479 } 1479 }
1480 1480
1481 if (tx_ring->desc) { 1481 if (tx_ring->desc) {
1482 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, 1482 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1483 tx_ring->dma); 1483 tx_ring->dma);
1484 tx_ring->desc = NULL; 1484 tx_ring->desc = NULL;
1485 } 1485 }
1486 if (rx_ring->desc) { 1486 if (rx_ring->desc) {
1487 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, 1487 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1488 rx_ring->dma); 1488 rx_ring->dma);
1489 rx_ring->desc = NULL; 1489 rx_ring->desc = NULL;
1490 } 1490 }
1491 1491
@@ -1520,8 +1520,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1520 1520
1521 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 1521 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
1522 tx_ring->size = ALIGN(tx_ring->size, 4096); 1522 tx_ring->size = ALIGN(tx_ring->size, 4096);
1523 if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 1523 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1524 &tx_ring->dma))) { 1524 &tx_ring->dma, GFP_KERNEL);
1525 if (!(tx_ring->desc)) {
1525 ret_val = 2; 1526 ret_val = 2;
1526 goto err_nomem; 1527 goto err_nomem;
1527 } 1528 }
@@ -1563,8 +1564,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1563 tx_ring->tx_buffer_info[i].skb = skb; 1564 tx_ring->tx_buffer_info[i].skb = skb;
1564 tx_ring->tx_buffer_info[i].length = skb->len; 1565 tx_ring->tx_buffer_info[i].length = skb->len;
1565 tx_ring->tx_buffer_info[i].dma = 1566 tx_ring->tx_buffer_info[i].dma =
1566 pci_map_single(pdev, skb->data, skb->len, 1567 dma_map_single(&pdev->dev, skb->data, skb->len,
1567 PCI_DMA_TODEVICE); 1568 DMA_TO_DEVICE);
1568 desc->read.buffer_addr = 1569 desc->read.buffer_addr =
1569 cpu_to_le64(tx_ring->tx_buffer_info[i].dma); 1570 cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
1570 desc->read.cmd_type_len = cpu_to_le32(skb->len); 1571 desc->read.cmd_type_len = cpu_to_le32(skb->len);
@@ -1593,8 +1594,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1593 1594
1594 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 1595 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
1595 rx_ring->size = ALIGN(rx_ring->size, 4096); 1596 rx_ring->size = ALIGN(rx_ring->size, 4096);
1596 if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 1597 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1597 &rx_ring->dma))) { 1598 &rx_ring->dma, GFP_KERNEL);
1599 if (!(rx_ring->desc)) {
1598 ret_val = 5; 1600 ret_val = 5;
1599 goto err_nomem; 1601 goto err_nomem;
1600 } 1602 }
@@ -1661,8 +1663,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1661 skb_reserve(skb, NET_IP_ALIGN); 1663 skb_reserve(skb, NET_IP_ALIGN);
1662 rx_ring->rx_buffer_info[i].skb = skb; 1664 rx_ring->rx_buffer_info[i].skb = skb;
1663 rx_ring->rx_buffer_info[i].dma = 1665 rx_ring->rx_buffer_info[i].dma =
1664 pci_map_single(pdev, skb->data, IXGBE_RXBUFFER_2048, 1666 dma_map_single(&pdev->dev, skb->data,
1665 PCI_DMA_FROMDEVICE); 1667 IXGBE_RXBUFFER_2048, DMA_FROM_DEVICE);
1666 rx_desc->read.pkt_addr = 1668 rx_desc->read.pkt_addr =
1667 cpu_to_le64(rx_ring->rx_buffer_info[i].dma); 1669 cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
1668 memset(skb->data, 0x00, skb->len); 1670 memset(skb->data, 0x00, skb->len);
@@ -1775,10 +1777,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1775 ixgbe_create_lbtest_frame( 1777 ixgbe_create_lbtest_frame(
1776 tx_ring->tx_buffer_info[k].skb, 1778 tx_ring->tx_buffer_info[k].skb,
1777 1024); 1779 1024);
1778 pci_dma_sync_single_for_device(pdev, 1780 dma_sync_single_for_device(&pdev->dev,
1779 tx_ring->tx_buffer_info[k].dma, 1781 tx_ring->tx_buffer_info[k].dma,
1780 tx_ring->tx_buffer_info[k].length, 1782 tx_ring->tx_buffer_info[k].length,
1781 PCI_DMA_TODEVICE); 1783 DMA_TO_DEVICE);
1782 if (unlikely(++k == tx_ring->count)) 1784 if (unlikely(++k == tx_ring->count))
1783 k = 0; 1785 k = 0;
1784 } 1786 }
@@ -1789,10 +1791,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1789 good_cnt = 0; 1791 good_cnt = 0;
1790 do { 1792 do {
1791 /* receive the sent packets */ 1793 /* receive the sent packets */
1792 pci_dma_sync_single_for_cpu(pdev, 1794 dma_sync_single_for_cpu(&pdev->dev,
1793 rx_ring->rx_buffer_info[l].dma, 1795 rx_ring->rx_buffer_info[l].dma,
1794 IXGBE_RXBUFFER_2048, 1796 IXGBE_RXBUFFER_2048,
1795 PCI_DMA_FROMDEVICE); 1797 DMA_FROM_DEVICE);
1796 ret_val = ixgbe_check_lbtest_frame( 1798 ret_val = ixgbe_check_lbtest_frame(
1797 rx_ring->rx_buffer_info[l].skb, 1024); 1799 rx_ring->rx_buffer_info[l].skb, 1024);
1798 if (!ret_val) 1800 if (!ret_val)
@@ -2079,12 +2081,32 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
2079 return 0; 2081 return 0;
2080} 2082}
2081 2083
2084/*
2085 * this function must be called before setting the new value of
2086 * rx_itr_setting
2087 */
2088static bool ixgbe_reenable_rsc(struct ixgbe_adapter *adapter,
2089 struct ethtool_coalesce *ec)
2090{
2091 /* check the old value and enable RSC if necessary */
2092 if ((adapter->rx_itr_setting == 0) &&
2093 (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) {
2094 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2095 adapter->netdev->features |= NETIF_F_LRO;
2096 DPRINTK(PROBE, INFO, "rx-usecs set to %d, re-enabling RSC\n",
2097 ec->rx_coalesce_usecs);
2098 return true;
2099 }
2100 return false;
2101}
2102
2082static int ixgbe_set_coalesce(struct net_device *netdev, 2103static int ixgbe_set_coalesce(struct net_device *netdev,
2083 struct ethtool_coalesce *ec) 2104 struct ethtool_coalesce *ec)
2084{ 2105{
2085 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2106 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2086 struct ixgbe_q_vector *q_vector; 2107 struct ixgbe_q_vector *q_vector;
2087 int i; 2108 int i;
2109 bool need_reset = false;
2088 2110
2089 /* don't accept tx specific changes if we've got mixed RxTx vectors */ 2111 /* don't accept tx specific changes if we've got mixed RxTx vectors */
2090 if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count 2112 if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count
@@ -2095,11 +2117,20 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2095 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq; 2117 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
2096 2118
2097 if (ec->rx_coalesce_usecs > 1) { 2119 if (ec->rx_coalesce_usecs > 1) {
2120 u32 max_int;
2121 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
2122 max_int = IXGBE_MAX_RSC_INT_RATE;
2123 else
2124 max_int = IXGBE_MAX_INT_RATE;
2125
2098 /* check the limits */ 2126 /* check the limits */
2099 if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) || 2127 if ((1000000/ec->rx_coalesce_usecs > max_int) ||
2100 (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE)) 2128 (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
2101 return -EINVAL; 2129 return -EINVAL;
2102 2130
2131 /* check the old value and enable RSC if necessary */
2132 need_reset = ixgbe_reenable_rsc(adapter, ec);
2133
2103 /* store the value in ints/second */ 2134 /* store the value in ints/second */
2104 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs; 2135 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
2105 2136
@@ -2108,6 +2139,9 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2108 /* clear the lower bit as its used for dynamic state */ 2139 /* clear the lower bit as its used for dynamic state */
2109 adapter->rx_itr_setting &= ~1; 2140 adapter->rx_itr_setting &= ~1;
2110 } else if (ec->rx_coalesce_usecs == 1) { 2141 } else if (ec->rx_coalesce_usecs == 1) {
2142 /* check the old value and enable RSC if necessary */
2143 need_reset = ixgbe_reenable_rsc(adapter, ec);
2144
2111 /* 1 means dynamic mode */ 2145 /* 1 means dynamic mode */
2112 adapter->rx_eitr_param = 20000; 2146 adapter->rx_eitr_param = 20000;
2113 adapter->rx_itr_setting = 1; 2147 adapter->rx_itr_setting = 1;
@@ -2116,14 +2150,30 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2116 * any other value means disable eitr, which is best 2150 * any other value means disable eitr, which is best
2117 * served by setting the interrupt rate very high 2151 * served by setting the interrupt rate very high
2118 */ 2152 */
2119 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 2153 adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
2120 adapter->rx_eitr_param = IXGBE_MAX_RSC_INT_RATE;
2121 else
2122 adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
2123 adapter->rx_itr_setting = 0; 2154 adapter->rx_itr_setting = 0;
2155
2156 /*
2157 * if hardware RSC is enabled, disable it when
2158 * setting low latency mode, to avoid errata, assuming
2159 * that when the user set low latency mode they want
2160 * it at the cost of anything else
2161 */
2162 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2163 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2164 netdev->features &= ~NETIF_F_LRO;
2165 DPRINTK(PROBE, INFO,
2166 "rx-usecs set to 0, disabling RSC\n");
2167
2168 need_reset = true;
2169 }
2124 } 2170 }
2125 2171
2126 if (ec->tx_coalesce_usecs > 1) { 2172 if (ec->tx_coalesce_usecs > 1) {
2173 /*
2174 * don't have to worry about max_int as above because
2175 * tx vectors don't do hardware RSC (an rx function)
2176 */
2127 /* check the limits */ 2177 /* check the limits */
2128 if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) || 2178 if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
2129 (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE)) 2179 (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE))
@@ -2167,6 +2217,18 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2167 ixgbe_write_eitr(q_vector); 2217 ixgbe_write_eitr(q_vector);
2168 } 2218 }
2169 2219
2220 /*
2221 * do reset here at the end to make sure EITR==0 case is handled
2222 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2223 * also locks in RSC enable/disable which requires reset
2224 */
2225 if (need_reset) {
2226 if (netif_running(netdev))
2227 ixgbe_reinit_locked(adapter);
2228 else
2229 ixgbe_reset(adapter);
2230 }
2231
2170 return 0; 2232 return 0;
2171} 2233}
2172 2234
@@ -2178,10 +2240,26 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
2178 ethtool_op_set_flags(netdev, data); 2240 ethtool_op_set_flags(netdev, data);
2179 2241
2180 /* if state changes we need to update adapter->flags and reset */ 2242 /* if state changes we need to update adapter->flags and reset */
2181 if ((!!(data & ETH_FLAG_LRO)) != 2243 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
2182 (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) { 2244 /*
2183 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; 2245 * cast both to bool and verify if they are set the same
2184 need_reset = true; 2246 * but only enable RSC if itr is non-zero, as
2247 * itr=0 and RSC are mutually exclusive
2248 */
2249 if (((!!(data & ETH_FLAG_LRO)) !=
2250 (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) &&
2251 adapter->rx_itr_setting) {
2252 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
2253 switch (adapter->hw.mac.type) {
2254 case ixgbe_mac_82599EB:
2255 need_reset = true;
2256 break;
2257 default:
2258 break;
2259 }
2260 } else if (!adapter->rx_itr_setting) {
2261 netdev->features &= ~ETH_FLAG_LRO;
2262 }
2185 } 2263 }
2186 2264
2187 /* 2265 /*
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index a98ff0e76e86..d1a1868df817 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -175,6 +175,345 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
175 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; 175 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
176} 176}
177 177
178struct ixgbe_reg_info {
179 u32 ofs;
180 char *name;
181};
182
183static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
184
185 /* General Registers */
186 {IXGBE_CTRL, "CTRL"},
187 {IXGBE_STATUS, "STATUS"},
188 {IXGBE_CTRL_EXT, "CTRL_EXT"},
189
190 /* Interrupt Registers */
191 {IXGBE_EICR, "EICR"},
192
193 /* RX Registers */
194 {IXGBE_SRRCTL(0), "SRRCTL"},
195 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
196 {IXGBE_RDLEN(0), "RDLEN"},
197 {IXGBE_RDH(0), "RDH"},
198 {IXGBE_RDT(0), "RDT"},
199 {IXGBE_RXDCTL(0), "RXDCTL"},
200 {IXGBE_RDBAL(0), "RDBAL"},
201 {IXGBE_RDBAH(0), "RDBAH"},
202
203 /* TX Registers */
204 {IXGBE_TDBAL(0), "TDBAL"},
205 {IXGBE_TDBAH(0), "TDBAH"},
206 {IXGBE_TDLEN(0), "TDLEN"},
207 {IXGBE_TDH(0), "TDH"},
208 {IXGBE_TDT(0), "TDT"},
209 {IXGBE_TXDCTL(0), "TXDCTL"},
210
211 /* List Terminator */
212 {}
213};
214
215
216/*
217 * ixgbe_regdump - register printout routine
218 */
219static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
220{
221 int i = 0, j = 0;
222 char rname[16];
223 u32 regs[64];
224
225 switch (reginfo->ofs) {
226 case IXGBE_SRRCTL(0):
227 for (i = 0; i < 64; i++)
228 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
229 break;
230 case IXGBE_DCA_RXCTRL(0):
231 for (i = 0; i < 64; i++)
232 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
233 break;
234 case IXGBE_RDLEN(0):
235 for (i = 0; i < 64; i++)
236 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
237 break;
238 case IXGBE_RDH(0):
239 for (i = 0; i < 64; i++)
240 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
241 break;
242 case IXGBE_RDT(0):
243 for (i = 0; i < 64; i++)
244 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
245 break;
246 case IXGBE_RXDCTL(0):
247 for (i = 0; i < 64; i++)
248 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
249 break;
250 case IXGBE_RDBAL(0):
251 for (i = 0; i < 64; i++)
252 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
253 break;
254 case IXGBE_RDBAH(0):
255 for (i = 0; i < 64; i++)
256 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
257 break;
258 case IXGBE_TDBAL(0):
259 for (i = 0; i < 64; i++)
260 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
261 break;
262 case IXGBE_TDBAH(0):
263 for (i = 0; i < 64; i++)
264 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
265 break;
266 case IXGBE_TDLEN(0):
267 for (i = 0; i < 64; i++)
268 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
269 break;
270 case IXGBE_TDH(0):
271 for (i = 0; i < 64; i++)
272 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
273 break;
274 case IXGBE_TDT(0):
275 for (i = 0; i < 64; i++)
276 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
277 break;
278 case IXGBE_TXDCTL(0):
279 for (i = 0; i < 64; i++)
280 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
281 break;
282 default:
283 printk(KERN_INFO "%-15s %08x\n", reginfo->name,
284 IXGBE_READ_REG(hw, reginfo->ofs));
285 return;
286 }
287
288 for (i = 0; i < 8; i++) {
289 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
290 printk(KERN_ERR "%-15s ", rname);
291 for (j = 0; j < 8; j++)
292 printk(KERN_CONT "%08x ", regs[i*8+j]);
293 printk(KERN_CONT "\n");
294 }
295
296}
297
298/*
299 * ixgbe_dump - Print registers, tx-rings and rx-rings
300 */
301static void ixgbe_dump(struct ixgbe_adapter *adapter)
302{
303 struct net_device *netdev = adapter->netdev;
304 struct ixgbe_hw *hw = &adapter->hw;
305 struct ixgbe_reg_info *reginfo;
306 int n = 0;
307 struct ixgbe_ring *tx_ring;
308 struct ixgbe_tx_buffer *tx_buffer_info;
309 union ixgbe_adv_tx_desc *tx_desc;
310 struct my_u0 { u64 a; u64 b; } *u0;
311 struct ixgbe_ring *rx_ring;
312 union ixgbe_adv_rx_desc *rx_desc;
313 struct ixgbe_rx_buffer *rx_buffer_info;
314 u32 staterr;
315 int i = 0;
316
317 if (!netif_msg_hw(adapter))
318 return;
319
320 /* Print netdevice Info */
321 if (netdev) {
322 dev_info(&adapter->pdev->dev, "Net device Info\n");
323 printk(KERN_INFO "Device Name state "
324 "trans_start last_rx\n");
325 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
326 netdev->name,
327 netdev->state,
328 netdev->trans_start,
329 netdev->last_rx);
330 }
331
332 /* Print Registers */
333 dev_info(&adapter->pdev->dev, "Register Dump\n");
334 printk(KERN_INFO " Register Name Value\n");
335 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
336 reginfo->name; reginfo++) {
337 ixgbe_regdump(hw, reginfo);
338 }
339
340 /* Print TX Ring Summary */
341 if (!netdev || !netif_running(netdev))
342 goto exit;
343
344 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
345 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ] "
346 "leng ntw timestamp\n");
347 for (n = 0; n < adapter->num_tx_queues; n++) {
348 tx_ring = adapter->tx_ring[n];
349 tx_buffer_info =
350 &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
351 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
352 n, tx_ring->next_to_use, tx_ring->next_to_clean,
353 (u64)tx_buffer_info->dma,
354 tx_buffer_info->length,
355 tx_buffer_info->next_to_watch,
356 (u64)tx_buffer_info->time_stamp);
357 }
358
359 /* Print TX Rings */
360 if (!netif_msg_tx_done(adapter))
361 goto rx_ring_summary;
362
363 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
364
365 /* Transmit Descriptor Formats
366 *
367 * Advanced Transmit Descriptor
368 * +--------------------------------------------------------------+
369 * 0 | Buffer Address [63:0] |
370 * +--------------------------------------------------------------+
371 * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
372 * +--------------------------------------------------------------+
373 * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
374 */
375
376 for (n = 0; n < adapter->num_tx_queues; n++) {
377 tx_ring = adapter->tx_ring[n];
378 printk(KERN_INFO "------------------------------------\n");
379 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
380 printk(KERN_INFO "------------------------------------\n");
381 printk(KERN_INFO "T [desc] [address 63:0 ] "
382 "[PlPOIdStDDt Ln] [bi->dma ] "
383 "leng ntw timestamp bi->skb\n");
384
385 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
386 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
387 tx_buffer_info = &tx_ring->tx_buffer_info[i];
388 u0 = (struct my_u0 *)tx_desc;
389 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
390 " %04X %3X %016llX %p", i,
391 le64_to_cpu(u0->a),
392 le64_to_cpu(u0->b),
393 (u64)tx_buffer_info->dma,
394 tx_buffer_info->length,
395 tx_buffer_info->next_to_watch,
396 (u64)tx_buffer_info->time_stamp,
397 tx_buffer_info->skb);
398 if (i == tx_ring->next_to_use &&
399 i == tx_ring->next_to_clean)
400 printk(KERN_CONT " NTC/U\n");
401 else if (i == tx_ring->next_to_use)
402 printk(KERN_CONT " NTU\n");
403 else if (i == tx_ring->next_to_clean)
404 printk(KERN_CONT " NTC\n");
405 else
406 printk(KERN_CONT "\n");
407
408 if (netif_msg_pktdata(adapter) &&
409 tx_buffer_info->dma != 0)
410 print_hex_dump(KERN_INFO, "",
411 DUMP_PREFIX_ADDRESS, 16, 1,
412 phys_to_virt(tx_buffer_info->dma),
413 tx_buffer_info->length, true);
414 }
415 }
416
417 /* Print RX Rings Summary */
418rx_ring_summary:
419 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
420 printk(KERN_INFO "Queue [NTU] [NTC]\n");
421 for (n = 0; n < adapter->num_rx_queues; n++) {
422 rx_ring = adapter->rx_ring[n];
423 printk(KERN_INFO "%5d %5X %5X\n", n,
424 rx_ring->next_to_use, rx_ring->next_to_clean);
425 }
426
427 /* Print RX Rings */
428 if (!netif_msg_rx_status(adapter))
429 goto exit;
430
431 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
432
433 /* Advanced Receive Descriptor (Read) Format
434 * 63 1 0
435 * +-----------------------------------------------------+
436 * 0 | Packet Buffer Address [63:1] |A0/NSE|
437 * +----------------------------------------------+------+
438 * 8 | Header Buffer Address [63:1] | DD |
439 * +-----------------------------------------------------+
440 *
441 *
442 * Advanced Receive Descriptor (Write-Back) Format
443 *
444 * 63 48 47 32 31 30 21 20 16 15 4 3 0
445 * +------------------------------------------------------+
446 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
447 * | Checksum Ident | | | | Type | Type |
448 * +------------------------------------------------------+
449 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
450 * +------------------------------------------------------+
451 * 63 48 47 32 31 20 19 0
452 */
453 for (n = 0; n < adapter->num_rx_queues; n++) {
454 rx_ring = adapter->rx_ring[n];
455 printk(KERN_INFO "------------------------------------\n");
456 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
457 printk(KERN_INFO "------------------------------------\n");
458 printk(KERN_INFO "R [desc] [ PktBuf A0] "
459 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
460 "<-- Adv Rx Read format\n");
461 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
462 "[vl er S cks ln] ---------------- [bi->skb] "
463 "<-- Adv Rx Write-Back format\n");
464
465 for (i = 0; i < rx_ring->count; i++) {
466 rx_buffer_info = &rx_ring->rx_buffer_info[i];
467 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
468 u0 = (struct my_u0 *)rx_desc;
469 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
470 if (staterr & IXGBE_RXD_STAT_DD) {
471 /* Descriptor Done */
472 printk(KERN_INFO "RWB[0x%03X] %016llX "
473 "%016llX ---------------- %p", i,
474 le64_to_cpu(u0->a),
475 le64_to_cpu(u0->b),
476 rx_buffer_info->skb);
477 } else {
478 printk(KERN_INFO "R [0x%03X] %016llX "
479 "%016llX %016llX %p", i,
480 le64_to_cpu(u0->a),
481 le64_to_cpu(u0->b),
482 (u64)rx_buffer_info->dma,
483 rx_buffer_info->skb);
484
485 if (netif_msg_pktdata(adapter)) {
486 print_hex_dump(KERN_INFO, "",
487 DUMP_PREFIX_ADDRESS, 16, 1,
488 phys_to_virt(rx_buffer_info->dma),
489 rx_ring->rx_buf_len, true);
490
491 if (rx_ring->rx_buf_len
492 < IXGBE_RXBUFFER_2048)
493 print_hex_dump(KERN_INFO, "",
494 DUMP_PREFIX_ADDRESS, 16, 1,
495 phys_to_virt(
496 rx_buffer_info->page_dma +
497 rx_buffer_info->page_offset
498 ),
499 PAGE_SIZE/2, true);
500 }
501 }
502
503 if (i == rx_ring->next_to_use)
504 printk(KERN_CONT " NTU\n");
505 else if (i == rx_ring->next_to_clean)
506 printk(KERN_CONT " NTC\n");
507 else
508 printk(KERN_CONT "\n");
509
510 }
511 }
512
513exit:
514 return;
515}
516
178static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) 517static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
179{ 518{
180 u32 ctrl_ext; 519 u32 ctrl_ext;
@@ -266,15 +605,15 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
266{ 605{
267 if (tx_buffer_info->dma) { 606 if (tx_buffer_info->dma) {
268 if (tx_buffer_info->mapped_as_page) 607 if (tx_buffer_info->mapped_as_page)
269 pci_unmap_page(adapter->pdev, 608 dma_unmap_page(&adapter->pdev->dev,
270 tx_buffer_info->dma, 609 tx_buffer_info->dma,
271 tx_buffer_info->length, 610 tx_buffer_info->length,
272 PCI_DMA_TODEVICE); 611 DMA_TO_DEVICE);
273 else 612 else
274 pci_unmap_single(adapter->pdev, 613 dma_unmap_single(&adapter->pdev->dev,
275 tx_buffer_info->dma, 614 tx_buffer_info->dma,
276 tx_buffer_info->length, 615 tx_buffer_info->length,
277 PCI_DMA_TODEVICE); 616 DMA_TO_DEVICE);
278 tx_buffer_info->dma = 0; 617 tx_buffer_info->dma = 0;
279 } 618 }
280 if (tx_buffer_info->skb) { 619 if (tx_buffer_info->skb) {
@@ -721,10 +1060,10 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
721 bi->page_offset ^= (PAGE_SIZE / 2); 1060 bi->page_offset ^= (PAGE_SIZE / 2);
722 } 1061 }
723 1062
724 bi->page_dma = pci_map_page(pdev, bi->page, 1063 bi->page_dma = dma_map_page(&pdev->dev, bi->page,
725 bi->page_offset, 1064 bi->page_offset,
726 (PAGE_SIZE / 2), 1065 (PAGE_SIZE / 2),
727 PCI_DMA_FROMDEVICE); 1066 DMA_FROM_DEVICE);
728 } 1067 }
729 1068
730 if (!bi->skb) { 1069 if (!bi->skb) {
@@ -743,9 +1082,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
743 - skb->data)); 1082 - skb->data));
744 1083
745 bi->skb = skb; 1084 bi->skb = skb;
746 bi->dma = pci_map_single(pdev, skb->data, 1085 bi->dma = dma_map_single(&pdev->dev, skb->data,
747 rx_ring->rx_buf_len, 1086 rx_ring->rx_buf_len,
748 PCI_DMA_FROMDEVICE); 1087 DMA_FROM_DEVICE);
749 } 1088 }
750 /* Refresh the desc even if buffer_addrs didn't change because 1089 /* Refresh the desc even if buffer_addrs didn't change because
751 * each write-back erases this info. */ 1090 * each write-back erases this info. */
@@ -886,16 +1225,17 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
886 */ 1225 */
887 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; 1226 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
888 else 1227 else
889 pci_unmap_single(pdev, rx_buffer_info->dma, 1228 dma_unmap_single(&pdev->dev,
1229 rx_buffer_info->dma,
890 rx_ring->rx_buf_len, 1230 rx_ring->rx_buf_len,
891 PCI_DMA_FROMDEVICE); 1231 DMA_FROM_DEVICE);
892 rx_buffer_info->dma = 0; 1232 rx_buffer_info->dma = 0;
893 skb_put(skb, len); 1233 skb_put(skb, len);
894 } 1234 }
895 1235
896 if (upper_len) { 1236 if (upper_len) {
897 pci_unmap_page(pdev, rx_buffer_info->page_dma, 1237 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
898 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); 1238 PAGE_SIZE / 2, DMA_FROM_DEVICE);
899 rx_buffer_info->page_dma = 0; 1239 rx_buffer_info->page_dma = 0;
900 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 1240 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
901 rx_buffer_info->page, 1241 rx_buffer_info->page,
@@ -937,9 +1277,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
937 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); 1277 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
938 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 1278 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
939 if (IXGBE_RSC_CB(skb)->dma) { 1279 if (IXGBE_RSC_CB(skb)->dma) {
940 pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma, 1280 dma_unmap_single(&pdev->dev,
1281 IXGBE_RSC_CB(skb)->dma,
941 rx_ring->rx_buf_len, 1282 rx_ring->rx_buf_len,
942 PCI_DMA_FROMDEVICE); 1283 DMA_FROM_DEVICE);
943 IXGBE_RSC_CB(skb)->dma = 0; 1284 IXGBE_RSC_CB(skb)->dma = 0;
944 } 1285 }
945 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) 1286 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
@@ -1190,6 +1531,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1190 itr_reg |= (itr_reg << 16); 1531 itr_reg |= (itr_reg << 16);
1191 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1532 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1192 /* 1533 /*
1534 * 82599 can support a value of zero, so allow it for
1535 * max interrupt rate, but there is an errata where it can
1536 * not be zero with RSC
1537 */
1538 if (itr_reg == 8 &&
1539 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
1540 itr_reg = 0;
1541
1542 /*
1193 * set the WDIS bit to not clear the timer bits and cause an 1543 * set the WDIS bit to not clear the timer bits and cause an
1194 * immediate assertion of the interrupt 1544 * immediate assertion of the interrupt
1195 */ 1545 */
@@ -2372,7 +2722,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2372 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); 2722 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
2373 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift)); 2723 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
2374 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); 2724 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2375 ixgbe_set_vmolr(hw, adapter->num_vfs); 2725 ixgbe_set_vmolr(hw, adapter->num_vfs, true);
2376 } 2726 }
2377 2727
2378 /* Program MRQC for the distribution of queues */ 2728 /* Program MRQC for the distribution of queues */
@@ -2936,8 +3286,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2936 for (i = 0; i < adapter->num_tx_queues; i++) { 3286 for (i = 0; i < adapter->num_tx_queues; i++) {
2937 j = adapter->tx_ring[i]->reg_idx; 3287 j = adapter->tx_ring[i]->reg_idx;
2938 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 3288 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2939 /* enable WTHRESH=8 descriptors, to encourage burst writeback */ 3289 if (adapter->rx_itr_setting == 0) {
2940 txdctl |= (8 << 16); 3290 /* cannot set wthresh when itr==0 */
3291 txdctl &= ~0x007F0000;
3292 } else {
3293 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
3294 txdctl |= (8 << 16);
3295 }
2941 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 3296 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2942 } 3297 }
2943 3298
@@ -2991,6 +3346,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2991 else 3346 else
2992 ixgbe_configure_msi_and_legacy(adapter); 3347 ixgbe_configure_msi_and_legacy(adapter);
2993 3348
3349 /* enable the optics */
3350 if (hw->phy.multispeed_fiber)
3351 hw->mac.ops.enable_tx_laser(hw);
3352
2994 clear_bit(__IXGBE_DOWN, &adapter->state); 3353 clear_bit(__IXGBE_DOWN, &adapter->state);
2995 ixgbe_napi_enable_all(adapter); 3354 ixgbe_napi_enable_all(adapter);
2996 3355
@@ -3136,9 +3495,9 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3136 3495
3137 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 3496 rx_buffer_info = &rx_ring->rx_buffer_info[i];
3138 if (rx_buffer_info->dma) { 3497 if (rx_buffer_info->dma) {
3139 pci_unmap_single(pdev, rx_buffer_info->dma, 3498 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
3140 rx_ring->rx_buf_len, 3499 rx_ring->rx_buf_len,
3141 PCI_DMA_FROMDEVICE); 3500 DMA_FROM_DEVICE);
3142 rx_buffer_info->dma = 0; 3501 rx_buffer_info->dma = 0;
3143 } 3502 }
3144 if (rx_buffer_info->skb) { 3503 if (rx_buffer_info->skb) {
@@ -3147,9 +3506,10 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3147 do { 3506 do {
3148 struct sk_buff *this = skb; 3507 struct sk_buff *this = skb;
3149 if (IXGBE_RSC_CB(this)->dma) { 3508 if (IXGBE_RSC_CB(this)->dma) {
3150 pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma, 3509 dma_unmap_single(&pdev->dev,
3510 IXGBE_RSC_CB(this)->dma,
3151 rx_ring->rx_buf_len, 3511 rx_ring->rx_buf_len,
3152 PCI_DMA_FROMDEVICE); 3512 DMA_FROM_DEVICE);
3153 IXGBE_RSC_CB(this)->dma = 0; 3513 IXGBE_RSC_CB(this)->dma = 0;
3154 } 3514 }
3155 skb = skb->prev; 3515 skb = skb->prev;
@@ -3159,8 +3519,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3159 if (!rx_buffer_info->page) 3519 if (!rx_buffer_info->page)
3160 continue; 3520 continue;
3161 if (rx_buffer_info->page_dma) { 3521 if (rx_buffer_info->page_dma) {
3162 pci_unmap_page(pdev, rx_buffer_info->page_dma, 3522 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
3163 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); 3523 PAGE_SIZE / 2, DMA_FROM_DEVICE);
3164 rx_buffer_info->page_dma = 0; 3524 rx_buffer_info->page_dma = 0;
3165 } 3525 }
3166 put_page(rx_buffer_info->page); 3526 put_page(rx_buffer_info->page);
@@ -3252,6 +3612,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3252 /* signal that we are down to the interrupt handler */ 3612 /* signal that we are down to the interrupt handler */
3253 set_bit(__IXGBE_DOWN, &adapter->state); 3613 set_bit(__IXGBE_DOWN, &adapter->state);
3254 3614
3615 /* power down the optics */
3616 if (hw->phy.multispeed_fiber)
3617 hw->mac.ops.disable_tx_laser(hw);
3618
3255 /* disable receive for all VFs and wait one second */ 3619 /* disable receive for all VFs and wait one second */
3256 if (adapter->num_vfs) { 3620 if (adapter->num_vfs) {
3257 /* ping all the active vfs to let them know we are going down */ 3621 /* ping all the active vfs to let them know we are going down */
@@ -3269,22 +3633,23 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3269 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 3633 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3270 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 3634 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3271 3635
3272 netif_tx_disable(netdev);
3273
3274 IXGBE_WRITE_FLUSH(hw); 3636 IXGBE_WRITE_FLUSH(hw);
3275 msleep(10); 3637 msleep(10);
3276 3638
3277 netif_tx_stop_all_queues(netdev); 3639 netif_tx_stop_all_queues(netdev);
3278 3640
3279 ixgbe_irq_disable(adapter);
3280
3281 ixgbe_napi_disable_all(adapter);
3282
3283 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 3641 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
3284 del_timer_sync(&adapter->sfp_timer); 3642 del_timer_sync(&adapter->sfp_timer);
3285 del_timer_sync(&adapter->watchdog_timer); 3643 del_timer_sync(&adapter->watchdog_timer);
3286 cancel_work_sync(&adapter->watchdog_task); 3644 cancel_work_sync(&adapter->watchdog_task);
3287 3645
3646 netif_carrier_off(netdev);
3647 netif_tx_disable(netdev);
3648
3649 ixgbe_irq_disable(adapter);
3650
3651 ixgbe_napi_disable_all(adapter);
3652
3288 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 3653 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3289 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 3654 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
3290 cancel_work_sync(&adapter->fdir_reinit_task); 3655 cancel_work_sync(&adapter->fdir_reinit_task);
@@ -3302,8 +3667,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3302 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & 3667 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
3303 ~IXGBE_DMATXCTL_TE)); 3668 ~IXGBE_DMATXCTL_TE));
3304 3669
3305 netif_carrier_off(netdev);
3306
3307 /* clear n-tuple filters that are cached */ 3670 /* clear n-tuple filters that are cached */
3308 ethtool_ntuple_flush(netdev); 3671 ethtool_ntuple_flush(netdev);
3309 3672
@@ -3380,6 +3743,8 @@ static void ixgbe_reset_task(struct work_struct *work)
3380 3743
3381 adapter->tx_timeout_count++; 3744 adapter->tx_timeout_count++;
3382 3745
3746 ixgbe_dump(adapter);
3747 netdev_err(adapter->netdev, "Reset adapter\n");
3383 ixgbe_reinit_locked(adapter); 3748 ixgbe_reinit_locked(adapter);
3384} 3749}
3385 3750
@@ -4382,8 +4747,8 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
4382 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 4747 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
4383 tx_ring->size = ALIGN(tx_ring->size, 4096); 4748 tx_ring->size = ALIGN(tx_ring->size, 4096);
4384 4749
4385 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 4750 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
4386 &tx_ring->dma); 4751 &tx_ring->dma, GFP_KERNEL);
4387 if (!tx_ring->desc) 4752 if (!tx_ring->desc)
4388 goto err; 4753 goto err;
4389 4754
@@ -4453,7 +4818,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
4453 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 4818 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
4454 rx_ring->size = ALIGN(rx_ring->size, 4096); 4819 rx_ring->size = ALIGN(rx_ring->size, 4096);
4455 4820
4456 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma); 4821 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
4822 &rx_ring->dma, GFP_KERNEL);
4457 4823
4458 if (!rx_ring->desc) { 4824 if (!rx_ring->desc) {
4459 DPRINTK(PROBE, ERR, 4825 DPRINTK(PROBE, ERR,
@@ -4514,7 +4880,8 @@ void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
4514 vfree(tx_ring->tx_buffer_info); 4880 vfree(tx_ring->tx_buffer_info);
4515 tx_ring->tx_buffer_info = NULL; 4881 tx_ring->tx_buffer_info = NULL;
4516 4882
4517 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 4883 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
4884 tx_ring->dma);
4518 4885
4519 tx_ring->desc = NULL; 4886 tx_ring->desc = NULL;
4520} 4887}
@@ -4551,7 +4918,8 @@ void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
4551 vfree(rx_ring->rx_buffer_info); 4918 vfree(rx_ring->rx_buffer_info);
4552 rx_ring->rx_buffer_info = NULL; 4919 rx_ring->rx_buffer_info = NULL;
4553 4920
4554 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 4921 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
4922 rx_ring->dma);
4555 4923
4556 rx_ring->desc = NULL; 4924 rx_ring->desc = NULL;
4557} 4925}
@@ -5421,10 +5789,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
5421 5789
5422 tx_buffer_info->length = size; 5790 tx_buffer_info->length = size;
5423 tx_buffer_info->mapped_as_page = false; 5791 tx_buffer_info->mapped_as_page = false;
5424 tx_buffer_info->dma = pci_map_single(pdev, 5792 tx_buffer_info->dma = dma_map_single(&pdev->dev,
5425 skb->data + offset, 5793 skb->data + offset,
5426 size, PCI_DMA_TODEVICE); 5794 size, DMA_TO_DEVICE);
5427 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) 5795 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
5428 goto dma_error; 5796 goto dma_error;
5429 tx_buffer_info->time_stamp = jiffies; 5797 tx_buffer_info->time_stamp = jiffies;
5430 tx_buffer_info->next_to_watch = i; 5798 tx_buffer_info->next_to_watch = i;
@@ -5457,12 +5825,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
5457 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); 5825 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
5458 5826
5459 tx_buffer_info->length = size; 5827 tx_buffer_info->length = size;
5460 tx_buffer_info->dma = pci_map_page(adapter->pdev, 5828 tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
5461 frag->page, 5829 frag->page,
5462 offset, size, 5830 offset, size,
5463 PCI_DMA_TODEVICE); 5831 DMA_TO_DEVICE);
5464 tx_buffer_info->mapped_as_page = true; 5832 tx_buffer_info->mapped_as_page = true;
5465 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) 5833 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
5466 goto dma_error; 5834 goto dma_error;
5467 tx_buffer_info->time_stamp = jiffies; 5835 tx_buffer_info->time_stamp = jiffies;
5468 tx_buffer_info->next_to_watch = i; 5836 tx_buffer_info->next_to_watch = i;
@@ -5943,6 +6311,10 @@ static const struct net_device_ops ixgbe_netdev_ops = {
5943 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, 6311 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
5944 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, 6312 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
5945 .ndo_do_ioctl = ixgbe_ioctl, 6313 .ndo_do_ioctl = ixgbe_ioctl,
6314 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
6315 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
6316 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
6317 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
5946#ifdef CONFIG_NET_POLL_CONTROLLER 6318#ifdef CONFIG_NET_POLL_CONTROLLER
5947 .ndo_poll_controller = ixgbe_netpoll, 6319 .ndo_poll_controller = ixgbe_netpoll,
5948#endif 6320#endif
@@ -6040,13 +6412,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6040 if (err) 6412 if (err)
6041 return err; 6413 return err;
6042 6414
6043 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 6415 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
6044 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 6416 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
6045 pci_using_dac = 1; 6417 pci_using_dac = 1;
6046 } else { 6418 } else {
6047 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 6419 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6048 if (err) { 6420 if (err) {
6049 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 6421 err = dma_set_coherent_mask(&pdev->dev,
6422 DMA_BIT_MASK(32));
6050 if (err) { 6423 if (err) {
6051 dev_err(&pdev->dev, "No usable DMA " 6424 dev_err(&pdev->dev, "No usable DMA "
6052 "configuration, aborting\n"); 6425 "configuration, aborting\n");
@@ -6262,6 +6635,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6262 goto err_eeprom; 6635 goto err_eeprom;
6263 } 6636 }
6264 6637
6638 /* power down the optics */
6639 if (hw->phy.multispeed_fiber)
6640 hw->mac.ops.disable_tx_laser(hw);
6641
6265 init_timer(&adapter->watchdog_timer); 6642 init_timer(&adapter->watchdog_timer);
6266 adapter->watchdog_timer.function = &ixgbe_watchdog; 6643 adapter->watchdog_timer.function = &ixgbe_watchdog;
6267 adapter->watchdog_timer.data = (unsigned long)adapter; 6644 adapter->watchdog_timer.data = (unsigned long)adapter;
@@ -6409,16 +6786,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
6409 del_timer_sync(&adapter->sfp_timer); 6786 del_timer_sync(&adapter->sfp_timer);
6410 cancel_work_sync(&adapter->watchdog_task); 6787 cancel_work_sync(&adapter->watchdog_task);
6411 cancel_work_sync(&adapter->sfp_task); 6788 cancel_work_sync(&adapter->sfp_task);
6412 if (adapter->hw.phy.multispeed_fiber) {
6413 struct ixgbe_hw *hw = &adapter->hw;
6414 /*
6415 * Restart clause 37 autoneg, disable and re-enable
6416 * the tx laser, to clear & alert the link partner
6417 * that it needs to restart autotry
6418 */
6419 hw->mac.autotry_restart = true;
6420 hw->mac.ops.flap_tx_laser(hw);
6421 }
6422 cancel_work_sync(&adapter->multispeed_fiber_task); 6789 cancel_work_sync(&adapter->multispeed_fiber_task);
6423 cancel_work_sync(&adapter->sfp_config_module_task); 6790 cancel_work_sync(&adapter->sfp_config_module_task);
6424 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 6791 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index d4cd20f30199..f6cee94ec8e8 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -48,7 +48,11 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
48 int entries, u16 *hash_list, u32 vf) 48 int entries, u16 *hash_list, u32 vf)
49{ 49{
50 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; 50 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
51 struct ixgbe_hw *hw = &adapter->hw;
51 int i; 52 int i;
53 u32 vector_bit;
54 u32 vector_reg;
55 u32 mta_reg;
52 56
53 /* only so many hash values supported */ 57 /* only so many hash values supported */
54 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); 58 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
@@ -68,8 +72,13 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
68 vfinfo->vf_mc_hashes[i] = hash_list[i];; 72 vfinfo->vf_mc_hashes[i] = hash_list[i];;
69 } 73 }
70 74
71 /* Flush and reset the mta with the new values */ 75 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
72 ixgbe_set_rx_mode(adapter->netdev); 76 vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
77 vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
78 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
79 mta_reg |= (1 << vector_bit);
80 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
81 }
73 82
74 return 0; 83 return 0;
75} 84}
@@ -98,38 +107,51 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
98 107
99int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf) 108int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf)
100{ 109{
101 u32 ctrl;
102
103 /* Check if global VLAN already set, if not set it */
104 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
105 if (!(ctrl & IXGBE_VLNCTRL_VFE)) {
106 /* enable VLAN tag insert/strip */
107 ctrl |= IXGBE_VLNCTRL_VFE;
108 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
109 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
110 }
111
112 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); 110 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
113} 111}
114 112
115 113
116void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf) 114void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
117{ 115{
118 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 116 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
119 vmolr |= (IXGBE_VMOLR_AUPE | 117 vmolr |= (IXGBE_VMOLR_ROMPE |
120 IXGBE_VMOLR_ROMPE |
121 IXGBE_VMOLR_ROPE | 118 IXGBE_VMOLR_ROPE |
122 IXGBE_VMOLR_BAM); 119 IXGBE_VMOLR_BAM);
120 if (aupe)
121 vmolr |= IXGBE_VMOLR_AUPE;
122 else
123 vmolr &= ~IXGBE_VMOLR_AUPE;
123 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); 124 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
124} 125}
125 126
127static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
128{
129 struct ixgbe_hw *hw = &adapter->hw;
130
131 if (vid)
132 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf),
133 (vid | IXGBE_VMVIR_VLANA_DEFAULT));
134 else
135 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
136}
137
126inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) 138inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
127{ 139{
128 struct ixgbe_hw *hw = &adapter->hw; 140 struct ixgbe_hw *hw = &adapter->hw;
129 141
130 /* reset offloads to defaults */ 142 /* reset offloads to defaults */
131 ixgbe_set_vmolr(hw, vf); 143 if (adapter->vfinfo[vf].pf_vlan) {
132 144 ixgbe_set_vf_vlan(adapter, true,
145 adapter->vfinfo[vf].pf_vlan, vf);
146 ixgbe_set_vmvir(adapter,
147 (adapter->vfinfo[vf].pf_vlan |
148 (adapter->vfinfo[vf].pf_qos <<
149 VLAN_PRIO_SHIFT)), vf);
150 ixgbe_set_vmolr(hw, vf, false);
151 } else {
152 ixgbe_set_vmvir(adapter, 0, vf);
153 ixgbe_set_vmolr(hw, vf, true);
154 }
133 155
134 /* reset multicast table array for vf */ 156 /* reset multicast table array for vf */
135 adapter->vfinfo[vf].num_vf_mc_hashes = 0; 157 adapter->vfinfo[vf].num_vf_mc_hashes = 0;
@@ -263,10 +285,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
263 case IXGBE_VF_SET_MAC_ADDR: 285 case IXGBE_VF_SET_MAC_ADDR:
264 { 286 {
265 u8 *new_mac = ((u8 *)(&msgbuf[1])); 287 u8 *new_mac = ((u8 *)(&msgbuf[1]));
266 if (is_valid_ether_addr(new_mac)) 288 if (is_valid_ether_addr(new_mac) &&
289 !adapter->vfinfo[vf].pf_set_mac)
267 ixgbe_set_vf_mac(adapter, vf, new_mac); 290 ixgbe_set_vf_mac(adapter, vf, new_mac);
268 else 291 else
269 retval = -1; 292 ixgbe_set_vf_mac(adapter,
293 vf, adapter->vfinfo[vf].vf_mac_addresses);
270 } 294 }
271 break; 295 break;
272 case IXGBE_VF_SET_MULTICAST: 296 case IXGBE_VF_SET_MULTICAST:
@@ -360,3 +384,76 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
360 } 384 }
361} 385}
362 386
387int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
388{
389 struct ixgbe_adapter *adapter = netdev_priv(netdev);
390 if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
391 return -EINVAL;
392 adapter->vfinfo[vf].pf_set_mac = true;
393 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
394 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
395 " change effective.");
396 if (test_bit(__IXGBE_DOWN, &adapter->state)) {
397 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
398 " but the PF device is not up.\n");
399 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
400 " attempting to use the VF device.\n");
401 }
402 return ixgbe_set_vf_mac(adapter, vf, mac);
403}
404
405int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
406{
407 int err = 0;
408 struct ixgbe_adapter *adapter = netdev_priv(netdev);
409
410 if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
411 return -EINVAL;
412 if (vlan || qos) {
413 err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
414 if (err)
415 goto out;
416 ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
417 ixgbe_set_vmolr(&adapter->hw, vf, false);
418 adapter->vfinfo[vf].pf_vlan = vlan;
419 adapter->vfinfo[vf].pf_qos = qos;
420 dev_info(&adapter->pdev->dev,
421 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
422 if (test_bit(__IXGBE_DOWN, &adapter->state)) {
423 dev_warn(&adapter->pdev->dev,
424 "The VF VLAN has been set,"
425 " but the PF device is not up.\n");
426 dev_warn(&adapter->pdev->dev,
427 "Bring the PF device up before"
428 " attempting to use the VF device.\n");
429 }
430 } else {
431 err = ixgbe_set_vf_vlan(adapter, false,
432 adapter->vfinfo[vf].pf_vlan, vf);
433 ixgbe_set_vmvir(adapter, vlan, vf);
434 ixgbe_set_vmolr(&adapter->hw, vf, true);
435 adapter->vfinfo[vf].pf_vlan = 0;
436 adapter->vfinfo[vf].pf_qos = 0;
437 }
438out:
439 return err;
440}
441
442int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
443{
444 return -EOPNOTSUPP;
445}
446
447int ixgbe_ndo_get_vf_config(struct net_device *netdev,
448 int vf, struct ifla_vf_info *ivi)
449{
450 struct ixgbe_adapter *adapter = netdev_priv(netdev);
451 if (vf >= adapter->num_vfs)
452 return -EINVAL;
453 ivi->vf = vf;
454 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
455 ivi->tx_rate = 0;
456 ivi->vlan = adapter->vfinfo[vf].pf_vlan;
457 ivi->qos = adapter->vfinfo[vf].pf_qos;
458 return 0;
459}
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
index 51d1106c45a1..184730ecdfb6 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -32,7 +32,7 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
32 int entries, u16 *hash_list, u32 vf); 32 int entries, u16 *hash_list, u32 vf);
33void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); 33void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
34int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf); 34int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
35void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf); 35void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe);
36void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf); 36void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
37void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf); 37void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
38void ixgbe_msg_task(struct ixgbe_adapter *adapter); 38void ixgbe_msg_task(struct ixgbe_adapter *adapter);
@@ -42,6 +42,12 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
42void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); 42void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
43void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); 43void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
44void ixgbe_dump_registers(struct ixgbe_adapter *adapter); 44void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
45int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
46int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
47 u8 qos);
48int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
49int ixgbe_ndo_get_vf_config(struct net_device *netdev,
50 int vf, struct ifla_vf_info *ivi);
45 51
46#endif /* _IXGBE_SRIOV_H_ */ 52#endif /* _IXGBE_SRIOV_H_ */
47 53
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index aed4ed665648..4277cbbb8126 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -219,6 +219,7 @@
219#define IXGBE_MTQC 0x08120 219#define IXGBE_MTQC 0x08120
220#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ 220#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */
221#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ 221#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */
222#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */
222#define IXGBE_VT_CTL 0x051B0 223#define IXGBE_VT_CTL 0x051B0
223#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) 224#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
224#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) 225#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
@@ -1311,6 +1312,10 @@
1311#define IXGBE_VLVF_ENTRIES 64 1312#define IXGBE_VLVF_ENTRIES 64
1312#define IXGBE_VLVF_VLANID_MASK 0x00000FFF 1313#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
1313 1314
1315/* Per VF Port VLAN insertion rules */
1316#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
1317#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
1318
1314#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ 1319#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
1315 1320
1316/* STATUS Bit Masks */ 1321/* STATUS Bit Masks */
@@ -2398,6 +2403,8 @@ struct ixgbe_mac_operations {
2398 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); 2403 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
2399 2404
2400 /* Link */ 2405 /* Link */
2406 void (*disable_tx_laser)(struct ixgbe_hw *);
2407 void (*enable_tx_laser)(struct ixgbe_hw *);
2401 void (*flap_tx_laser)(struct ixgbe_hw *); 2408 void (*flap_tx_laser)(struct ixgbe_hw *);
2402 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); 2409 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
2403 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); 2410 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
index c44fdb05447a..ca2c81f49a05 100644
--- a/drivers/net/ixgbevf/defines.h
+++ b/drivers/net/ixgbevf/defines.h
@@ -41,11 +41,13 @@ typedef u32 ixgbe_link_speed;
41#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 41#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
42#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 42#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
43 43
44#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ 44#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
45#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ 45#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
46#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ 46#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
47#define IXGBE_LINKS_UP 0x40000000 47#define IXGBE_LINKS_UP 0x40000000
48#define IXGBE_LINKS_SPEED 0x20000000 48#define IXGBE_LINKS_SPEED_82599 0x30000000
49#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
50#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
49 51
50/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ 52/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
51#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 53#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index f484161418b6..460c37fee965 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -139,15 +139,15 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
139{ 139{
140 if (tx_buffer_info->dma) { 140 if (tx_buffer_info->dma) {
141 if (tx_buffer_info->mapped_as_page) 141 if (tx_buffer_info->mapped_as_page)
142 pci_unmap_page(adapter->pdev, 142 dma_unmap_page(&adapter->pdev->dev,
143 tx_buffer_info->dma, 143 tx_buffer_info->dma,
144 tx_buffer_info->length, 144 tx_buffer_info->length,
145 PCI_DMA_TODEVICE); 145 DMA_TO_DEVICE);
146 else 146 else
147 pci_unmap_single(adapter->pdev, 147 dma_unmap_single(&adapter->pdev->dev,
148 tx_buffer_info->dma, 148 tx_buffer_info->dma,
149 tx_buffer_info->length, 149 tx_buffer_info->length,
150 PCI_DMA_TODEVICE); 150 DMA_TO_DEVICE);
151 tx_buffer_info->dma = 0; 151 tx_buffer_info->dma = 0;
152 } 152 }
153 if (tx_buffer_info->skb) { 153 if (tx_buffer_info->skb) {
@@ -416,10 +416,10 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
416 bi->page_offset ^= (PAGE_SIZE / 2); 416 bi->page_offset ^= (PAGE_SIZE / 2);
417 } 417 }
418 418
419 bi->page_dma = pci_map_page(pdev, bi->page, 419 bi->page_dma = dma_map_page(&pdev->dev, bi->page,
420 bi->page_offset, 420 bi->page_offset,
421 (PAGE_SIZE / 2), 421 (PAGE_SIZE / 2),
422 PCI_DMA_FROMDEVICE); 422 DMA_FROM_DEVICE);
423 } 423 }
424 424
425 skb = bi->skb; 425 skb = bi->skb;
@@ -442,9 +442,9 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
442 bi->skb = skb; 442 bi->skb = skb;
443 } 443 }
444 if (!bi->dma) { 444 if (!bi->dma) {
445 bi->dma = pci_map_single(pdev, skb->data, 445 bi->dma = dma_map_single(&pdev->dev, skb->data,
446 rx_ring->rx_buf_len, 446 rx_ring->rx_buf_len,
447 PCI_DMA_FROMDEVICE); 447 DMA_FROM_DEVICE);
448 } 448 }
449 /* Refresh the desc even if buffer_addrs didn't change because 449 /* Refresh the desc even if buffer_addrs didn't change because
450 * each write-back erases this info. */ 450 * each write-back erases this info. */
@@ -536,16 +536,16 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
536 rx_buffer_info->skb = NULL; 536 rx_buffer_info->skb = NULL;
537 537
538 if (rx_buffer_info->dma) { 538 if (rx_buffer_info->dma) {
539 pci_unmap_single(pdev, rx_buffer_info->dma, 539 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
540 rx_ring->rx_buf_len, 540 rx_ring->rx_buf_len,
541 PCI_DMA_FROMDEVICE); 541 DMA_FROM_DEVICE);
542 rx_buffer_info->dma = 0; 542 rx_buffer_info->dma = 0;
543 skb_put(skb, len); 543 skb_put(skb, len);
544 } 544 }
545 545
546 if (upper_len) { 546 if (upper_len) {
547 pci_unmap_page(pdev, rx_buffer_info->page_dma, 547 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
548 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); 548 PAGE_SIZE / 2, DMA_FROM_DEVICE);
549 rx_buffer_info->page_dma = 0; 549 rx_buffer_info->page_dma = 0;
550 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 550 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
551 rx_buffer_info->page, 551 rx_buffer_info->page,
@@ -961,12 +961,28 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
961 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS); 961 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
962 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr); 962 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
963 963
964 if (!hw->mbx.ops.check_for_ack(hw)) {
965 /*
966 * checking for the ack clears the PFACK bit. Place
967 * it back in the v2p_mailbox cache so that anyone
968 * polling for an ack will not miss it. Also
969 * avoid the read below because the code to read
970 * the mailbox will also clear the ack bit. This was
971 * causing lost acks. Just cache the bit and exit
972 * the IRQ handler.
973 */
974 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
975 goto out;
976 }
977
978 /* Not an ack interrupt, go ahead and read the message */
964 hw->mbx.ops.read(hw, &msg, 1); 979 hw->mbx.ops.read(hw, &msg, 1);
965 980
966 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) 981 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
967 mod_timer(&adapter->watchdog_timer, 982 mod_timer(&adapter->watchdog_timer,
968 round_jiffies(jiffies + 1)); 983 round_jiffies(jiffies + 1));
969 984
985out:
970 return IRQ_HANDLED; 986 return IRQ_HANDLED;
971} 987}
972 988
@@ -1721,9 +1737,9 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1721 1737
1722 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 1738 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1723 if (rx_buffer_info->dma) { 1739 if (rx_buffer_info->dma) {
1724 pci_unmap_single(pdev, rx_buffer_info->dma, 1740 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
1725 rx_ring->rx_buf_len, 1741 rx_ring->rx_buf_len,
1726 PCI_DMA_FROMDEVICE); 1742 DMA_FROM_DEVICE);
1727 rx_buffer_info->dma = 0; 1743 rx_buffer_info->dma = 0;
1728 } 1744 }
1729 if (rx_buffer_info->skb) { 1745 if (rx_buffer_info->skb) {
@@ -1737,8 +1753,8 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1737 } 1753 }
1738 if (!rx_buffer_info->page) 1754 if (!rx_buffer_info->page)
1739 continue; 1755 continue;
1740 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2, 1756 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
1741 PCI_DMA_FROMDEVICE); 1757 PAGE_SIZE / 2, DMA_FROM_DEVICE);
1742 rx_buffer_info->page_dma = 0; 1758 rx_buffer_info->page_dma = 0;
1743 put_page(rx_buffer_info->page); 1759 put_page(rx_buffer_info->page);
1744 rx_buffer_info->page = NULL; 1760 rx_buffer_info->page = NULL;
@@ -2445,7 +2461,8 @@ void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2445 vfree(tx_ring->tx_buffer_info); 2461 vfree(tx_ring->tx_buffer_info);
2446 tx_ring->tx_buffer_info = NULL; 2462 tx_ring->tx_buffer_info = NULL;
2447 2463
2448 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 2464 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2465 tx_ring->dma);
2449 2466
2450 tx_ring->desc = NULL; 2467 tx_ring->desc = NULL;
2451} 2468}
@@ -2490,8 +2507,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2490 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2507 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2491 tx_ring->size = ALIGN(tx_ring->size, 4096); 2508 tx_ring->size = ALIGN(tx_ring->size, 4096);
2492 2509
2493 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 2510 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2494 &tx_ring->dma); 2511 &tx_ring->dma, GFP_KERNEL);
2495 if (!tx_ring->desc) 2512 if (!tx_ring->desc)
2496 goto err; 2513 goto err;
2497 2514
@@ -2561,8 +2578,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2561 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 2578 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2562 rx_ring->size = ALIGN(rx_ring->size, 4096); 2579 rx_ring->size = ALIGN(rx_ring->size, 4096);
2563 2580
2564 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 2581 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2565 &rx_ring->dma); 2582 &rx_ring->dma, GFP_KERNEL);
2566 2583
2567 if (!rx_ring->desc) { 2584 if (!rx_ring->desc) {
2568 hw_dbg(&adapter->hw, 2585 hw_dbg(&adapter->hw,
@@ -2623,7 +2640,8 @@ void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2623 vfree(rx_ring->rx_buffer_info); 2640 vfree(rx_ring->rx_buffer_info);
2624 rx_ring->rx_buffer_info = NULL; 2641 rx_ring->rx_buffer_info = NULL;
2625 2642
2626 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 2643 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2644 rx_ring->dma);
2627 2645
2628 rx_ring->desc = NULL; 2646 rx_ring->desc = NULL;
2629} 2647}
@@ -2935,10 +2953,10 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2935 2953
2936 tx_buffer_info->length = size; 2954 tx_buffer_info->length = size;
2937 tx_buffer_info->mapped_as_page = false; 2955 tx_buffer_info->mapped_as_page = false;
2938 tx_buffer_info->dma = pci_map_single(adapter->pdev, 2956 tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev,
2939 skb->data + offset, 2957 skb->data + offset,
2940 size, PCI_DMA_TODEVICE); 2958 size, DMA_TO_DEVICE);
2941 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) 2959 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
2942 goto dma_error; 2960 goto dma_error;
2943 tx_buffer_info->time_stamp = jiffies; 2961 tx_buffer_info->time_stamp = jiffies;
2944 tx_buffer_info->next_to_watch = i; 2962 tx_buffer_info->next_to_watch = i;
@@ -2964,13 +2982,13 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2964 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2982 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2965 2983
2966 tx_buffer_info->length = size; 2984 tx_buffer_info->length = size;
2967 tx_buffer_info->dma = pci_map_page(adapter->pdev, 2985 tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
2968 frag->page, 2986 frag->page,
2969 offset, 2987 offset,
2970 size, 2988 size,
2971 PCI_DMA_TODEVICE); 2989 DMA_TO_DEVICE);
2972 tx_buffer_info->mapped_as_page = true; 2990 tx_buffer_info->mapped_as_page = true;
2973 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) 2991 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
2974 goto dma_error; 2992 goto dma_error;
2975 tx_buffer_info->time_stamp = jiffies; 2993 tx_buffer_info->time_stamp = jiffies;
2976 tx_buffer_info->next_to_watch = i; 2994 tx_buffer_info->next_to_watch = i;
@@ -3311,14 +3329,14 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3311 if (err) 3329 if (err)
3312 return err; 3330 return err;
3313 3331
3314 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 3332 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3315 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 3333 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3316 pci_using_dac = 1; 3334 pci_using_dac = 1;
3317 } else { 3335 } else {
3318 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3336 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3319 if (err) { 3337 if (err) {
3320 err = pci_set_consistent_dma_mask(pdev, 3338 err = dma_set_coherent_mask(&pdev->dev,
3321 DMA_BIT_MASK(32)); 3339 DMA_BIT_MASK(32));
3322 if (err) { 3340 if (err) {
3323 dev_err(&pdev->dev, "No usable DMA " 3341 dev_err(&pdev->dev, "No usable DMA "
3324 "configuration, aborting\n"); 3342 "configuration, aborting\n");
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
index 852e9c4fd934..f6f929958ba0 100644
--- a/drivers/net/ixgbevf/vf.c
+++ b/drivers/net/ixgbevf/vf.c
@@ -359,7 +359,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
359 else 359 else
360 *link_up = false; 360 *link_up = false;
361 361
362 if (links_reg & IXGBE_LINKS_SPEED) 362 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
363 IXGBE_LINKS_SPEED_10G_82599)
363 *speed = IXGBE_LINK_SPEED_10GB_FULL; 364 *speed = IXGBE_LINK_SPEED_10GB_FULL;
364 else 365 else
365 *speed = IXGBE_LINK_SPEED_1GB_FULL; 366 *speed = IXGBE_LINK_SPEED_1GB_FULL;
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index b91492f4e48a..f852ab3ae9cf 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * ks8842_main.c timberdale KS8842 ethernet driver 2 * ks8842.c timberdale KS8842 ethernet driver
3 * Copyright (c) 2009 Intel Corporation 3 * Copyright (c) 2009 Intel Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -28,6 +28,7 @@
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
30#include <linux/ethtool.h> 30#include <linux/ethtool.h>
31#include <linux/ks8842.h>
31 32
32#define DRV_NAME "ks8842" 33#define DRV_NAME "ks8842"
33 34
@@ -304,6 +305,20 @@ static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
304 ks8842_write16(adapter, 39, mac, REG_MACAR3); 305 ks8842_write16(adapter, 39, mac, REG_MACAR3);
305} 306}
306 307
308static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
309{
310 unsigned long flags;
311 unsigned i;
312
313 spin_lock_irqsave(&adapter->lock, flags);
314 for (i = 0; i < ETH_ALEN; i++) {
315 ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
316 ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
317 REG_MACAR1 + i);
318 }
319 spin_unlock_irqrestore(&adapter->lock, flags);
320}
321
307static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter) 322static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
308{ 323{
309 return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff; 324 return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
@@ -522,6 +537,8 @@ static int ks8842_open(struct net_device *netdev)
522 /* reset the HW */ 537 /* reset the HW */
523 ks8842_reset_hw(adapter); 538 ks8842_reset_hw(adapter);
524 539
540 ks8842_write_mac_addr(adapter, netdev->dev_addr);
541
525 ks8842_update_link_status(netdev, adapter); 542 ks8842_update_link_status(netdev, adapter);
526 543
527 err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME, 544 err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
@@ -568,10 +585,8 @@ static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb,
568static int ks8842_set_mac(struct net_device *netdev, void *p) 585static int ks8842_set_mac(struct net_device *netdev, void *p)
569{ 586{
570 struct ks8842_adapter *adapter = netdev_priv(netdev); 587 struct ks8842_adapter *adapter = netdev_priv(netdev);
571 unsigned long flags;
572 struct sockaddr *addr = p; 588 struct sockaddr *addr = p;
573 char *mac = (u8 *)addr->sa_data; 589 char *mac = (u8 *)addr->sa_data;
574 int i;
575 590
576 dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__); 591 dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
577 592
@@ -580,13 +595,7 @@ static int ks8842_set_mac(struct net_device *netdev, void *p)
580 595
581 memcpy(netdev->dev_addr, mac, netdev->addr_len); 596 memcpy(netdev->dev_addr, mac, netdev->addr_len);
582 597
583 spin_lock_irqsave(&adapter->lock, flags); 598 ks8842_write_mac_addr(adapter, mac);
584 for (i = 0; i < ETH_ALEN; i++) {
585 ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
586 ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
587 REG_MACAR1 + i);
588 }
589 spin_unlock_irqrestore(&adapter->lock, flags);
590 return 0; 599 return 0;
591} 600}
592 601
@@ -605,6 +614,8 @@ static void ks8842_tx_timeout(struct net_device *netdev)
605 614
606 ks8842_reset_hw(adapter); 615 ks8842_reset_hw(adapter);
607 616
617 ks8842_write_mac_addr(adapter, netdev->dev_addr);
618
608 ks8842_update_link_status(netdev, adapter); 619 ks8842_update_link_status(netdev, adapter);
609} 620}
610 621
@@ -627,7 +638,9 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
627 struct resource *iomem; 638 struct resource *iomem;
628 struct net_device *netdev; 639 struct net_device *netdev;
629 struct ks8842_adapter *adapter; 640 struct ks8842_adapter *adapter;
641 struct ks8842_platform_data *pdata = pdev->dev.platform_data;
630 u16 id; 642 u16 id;
643 unsigned i;
631 644
632 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 645 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
633 if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME)) 646 if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME))
@@ -658,7 +671,25 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
658 netdev->netdev_ops = &ks8842_netdev_ops; 671 netdev->netdev_ops = &ks8842_netdev_ops;
659 netdev->ethtool_ops = &ks8842_ethtool_ops; 672 netdev->ethtool_ops = &ks8842_ethtool_ops;
660 673
661 ks8842_read_mac_addr(adapter, netdev->dev_addr); 674 /* Check if a mac address was given */
675 i = netdev->addr_len;
676 if (pdata) {
677 for (i = 0; i < netdev->addr_len; i++)
678 if (pdata->macaddr[i] != 0)
679 break;
680
681 if (i < netdev->addr_len)
682 /* an address was passed, use it */
683 memcpy(netdev->dev_addr, pdata->macaddr,
684 netdev->addr_len);
685 }
686
687 if (i == netdev->addr_len) {
688 ks8842_read_mac_addr(adapter, netdev->dev_addr);
689
690 if (!is_valid_ether_addr(netdev->dev_addr))
691 random_ether_addr(netdev->dev_addr);
692 }
662 693
663 id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE); 694 id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE);
664 695
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 4dcd61f81ec2..b4fb07a6f13f 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -78,7 +78,9 @@ union ks8851_tx_hdr {
78 * @msg_enable: The message flags controlling driver output (see ethtool). 78 * @msg_enable: The message flags controlling driver output (see ethtool).
79 * @fid: Incrementing frame id tag. 79 * @fid: Incrementing frame id tag.
80 * @rc_ier: Cached copy of KS_IER. 80 * @rc_ier: Cached copy of KS_IER.
81 * @rc_ccr: Cached copy of KS_CCR.
81 * @rc_rxqcr: Cached copy of KS_RXQCR. 82 * @rc_rxqcr: Cached copy of KS_RXQCR.
83 * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
82 * 84 *
83 * The @lock ensures that the chip is protected when certain operations are 85 * The @lock ensures that the chip is protected when certain operations are
84 * in progress. When the read or write packet transfer is in progress, most 86 * in progress. When the read or write packet transfer is in progress, most
@@ -109,6 +111,8 @@ struct ks8851_net {
109 111
110 u16 rc_ier; 112 u16 rc_ier;
111 u16 rc_rxqcr; 113 u16 rc_rxqcr;
114 u16 rc_ccr;
115 u16 eeprom_size;
112 116
113 struct mii_if_info mii; 117 struct mii_if_info mii;
114 struct ks8851_rxctrl rxctrl; 118 struct ks8851_rxctrl rxctrl;
@@ -717,12 +721,14 @@ static void ks8851_tx_work(struct work_struct *work)
717 txb = skb_dequeue(&ks->txq); 721 txb = skb_dequeue(&ks->txq);
718 last = skb_queue_empty(&ks->txq); 722 last = skb_queue_empty(&ks->txq);
719 723
720 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); 724 if (txb != NULL) {
721 ks8851_wrpkt(ks, txb, last); 725 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
722 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); 726 ks8851_wrpkt(ks, txb, last);
723 ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE); 727 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
728 ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
724 729
725 ks8851_done_tx(ks, txb); 730 ks8851_done_tx(ks, txb);
731 }
726 } 732 }
727 733
728 mutex_unlock(&ks->lock); 734 mutex_unlock(&ks->lock);
@@ -1028,6 +1034,234 @@ static const struct net_device_ops ks8851_netdev_ops = {
1028 .ndo_validate_addr = eth_validate_addr, 1034 .ndo_validate_addr = eth_validate_addr,
1029}; 1035};
1030 1036
1037/* Companion eeprom access */
1038
1039enum { /* EEPROM programming states */
1040 EEPROM_CONTROL,
1041 EEPROM_ADDRESS,
1042 EEPROM_DATA,
1043 EEPROM_COMPLETE
1044};
1045
1046/**
1047 * ks8851_eeprom_read - read a 16bits word in ks8851 companion EEPROM
1048 * @dev: The network device the PHY is on.
1049 * @addr: EEPROM address to read
1050 *
1051 * eeprom_size: used to define the data coding length. Can be changed
1052 * through debug-fs.
1053 *
1054 * Programs a read on the EEPROM using ks8851 EEPROM SW access feature.
1055 * Warning: The READ feature is not supported on ks8851 revision 0.
1056 *
1057 * Rough programming model:
1058 * - on period start: set clock high and read value on bus
1059 * - on period / 2: set clock low and program value on bus
1060 * - start on period / 2
1061 */
1062unsigned int ks8851_eeprom_read(struct net_device *dev, unsigned int addr)
1063{
1064 struct ks8851_net *ks = netdev_priv(dev);
1065 int eepcr;
1066 int ctrl = EEPROM_OP_READ;
1067 int state = EEPROM_CONTROL;
1068 int bit_count = EEPROM_OP_LEN - 1;
1069 unsigned int data = 0;
1070 int dummy;
1071 unsigned int addr_len;
1072
1073 addr_len = (ks->eeprom_size == 128) ? 6 : 8;
1074
1075 /* start transaction: chip select high, authorize write */
1076 mutex_lock(&ks->lock);
1077 eepcr = EEPCR_EESA | EEPCR_EESRWA;
1078 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1079 eepcr |= EEPCR_EECS;
1080 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1081 mutex_unlock(&ks->lock);
1082
1083 while (state != EEPROM_COMPLETE) {
1084 /* falling clock period starts... */
1085 /* set EED_IO pin for control and address */
1086 eepcr &= ~EEPCR_EEDO;
1087 switch (state) {
1088 case EEPROM_CONTROL:
1089 eepcr |= ((ctrl >> bit_count) & 1) << 2;
1090 if (bit_count-- <= 0) {
1091 bit_count = addr_len - 1;
1092 state = EEPROM_ADDRESS;
1093 }
1094 break;
1095 case EEPROM_ADDRESS:
1096 eepcr |= ((addr >> bit_count) & 1) << 2;
1097 bit_count--;
1098 break;
1099 case EEPROM_DATA:
1100 /* Change to receive mode */
1101 eepcr &= ~EEPCR_EESRWA;
1102 break;
1103 }
1104
1105 /* lower clock */
1106 eepcr &= ~EEPCR_EESCK;
1107
1108 mutex_lock(&ks->lock);
1109 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1110 mutex_unlock(&ks->lock);
1111
1112 /* waitread period / 2 */
1113 udelay(EEPROM_SK_PERIOD / 2);
1114
1115 /* rising clock period starts... */
1116
1117 /* raise clock */
1118 mutex_lock(&ks->lock);
1119 eepcr |= EEPCR_EESCK;
1120 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1121 mutex_unlock(&ks->lock);
1122
1123 /* Manage read */
1124 switch (state) {
1125 case EEPROM_ADDRESS:
1126 if (bit_count < 0) {
1127 bit_count = EEPROM_DATA_LEN - 1;
1128 state = EEPROM_DATA;
1129 }
1130 break;
1131 case EEPROM_DATA:
1132 mutex_lock(&ks->lock);
1133 dummy = ks8851_rdreg16(ks, KS_EEPCR);
1134 mutex_unlock(&ks->lock);
1135 data |= ((dummy >> EEPCR_EESB_OFFSET) & 1) << bit_count;
1136 if (bit_count-- <= 0)
1137 state = EEPROM_COMPLETE;
1138 break;
1139 }
1140
1141 /* wait period / 2 */
1142 udelay(EEPROM_SK_PERIOD / 2);
1143 }
1144
1145 /* close transaction */
1146 mutex_lock(&ks->lock);
1147 eepcr &= ~EEPCR_EECS;
1148 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1149 eepcr = 0;
1150 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1151 mutex_unlock(&ks->lock);
1152
1153 return data;
1154}
1155
1156/**
1157 * ks8851_eeprom_write - write a 16bits word in ks8851 companion EEPROM
1158 * @dev: The network device the PHY is on.
1159 * @op: operand (can be WRITE, EWEN, EWDS)
1160 * @addr: EEPROM address to write
1161 * @data: data to write
1162 *
1163 * eeprom_size: used to define the data coding length. Can be changed
1164 * through debug-fs.
1165 *
1166 * Programs a write on the EEPROM using ks8851 EEPROM SW access feature.
1167 *
1168 * Note that a write enable is required before writing data.
1169 *
1170 * Rough programming model:
1171 * - on period start: set clock high
1172 * - on period / 2: set clock low and program value on bus
1173 * - start on period / 2
1174 */
1175void ks8851_eeprom_write(struct net_device *dev, unsigned int op,
1176 unsigned int addr, unsigned int data)
1177{
1178 struct ks8851_net *ks = netdev_priv(dev);
1179 int eepcr;
1180 int state = EEPROM_CONTROL;
1181 int bit_count = EEPROM_OP_LEN - 1;
1182 unsigned int addr_len;
1183
1184 addr_len = (ks->eeprom_size == 128) ? 6 : 8;
1185
1186 switch (op) {
1187 case EEPROM_OP_EWEN:
1188 addr = 0x30;
1189 break;
1190 case EEPROM_OP_EWDS:
1191 addr = 0;
1192 break;
1193 }
1194
1195 /* start transaction: chip select high, authorize write */
1196 mutex_lock(&ks->lock);
1197 eepcr = EEPCR_EESA | EEPCR_EESRWA;
1198 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1199 eepcr |= EEPCR_EECS;
1200 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1201 mutex_unlock(&ks->lock);
1202
1203 while (state != EEPROM_COMPLETE) {
1204 /* falling clock period starts... */
1205 /* set EED_IO pin for control and address */
1206 eepcr &= ~EEPCR_EEDO;
1207 switch (state) {
1208 case EEPROM_CONTROL:
1209 eepcr |= ((op >> bit_count) & 1) << 2;
1210 if (bit_count-- <= 0) {
1211 bit_count = addr_len - 1;
1212 state = EEPROM_ADDRESS;
1213 }
1214 break;
1215 case EEPROM_ADDRESS:
1216 eepcr |= ((addr >> bit_count) & 1) << 2;
1217 if (bit_count-- <= 0) {
1218 if (op == EEPROM_OP_WRITE) {
1219 bit_count = EEPROM_DATA_LEN - 1;
1220 state = EEPROM_DATA;
1221 } else {
1222 state = EEPROM_COMPLETE;
1223 }
1224 }
1225 break;
1226 case EEPROM_DATA:
1227 eepcr |= ((data >> bit_count) & 1) << 2;
1228 if (bit_count-- <= 0)
1229 state = EEPROM_COMPLETE;
1230 break;
1231 }
1232
1233 /* lower clock */
1234 eepcr &= ~EEPCR_EESCK;
1235
1236 mutex_lock(&ks->lock);
1237 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1238 mutex_unlock(&ks->lock);
1239
1240 /* wait period / 2 */
1241 udelay(EEPROM_SK_PERIOD / 2);
1242
1243 /* rising clock period starts... */
1244
1245 /* raise clock */
1246 eepcr |= EEPCR_EESCK;
1247 mutex_lock(&ks->lock);
1248 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1249 mutex_unlock(&ks->lock);
1250
1251 /* wait period / 2 */
1252 udelay(EEPROM_SK_PERIOD / 2);
1253 }
1254
1255 /* close transaction */
1256 mutex_lock(&ks->lock);
1257 eepcr &= ~EEPCR_EECS;
1258 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1259 eepcr = 0;
1260 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1261 mutex_unlock(&ks->lock);
1262
1263}
1264
1031/* ethtool support */ 1265/* ethtool support */
1032 1266
1033static void ks8851_get_drvinfo(struct net_device *dev, 1267static void ks8851_get_drvinfo(struct net_device *dev,
@@ -1074,6 +1308,117 @@ static int ks8851_nway_reset(struct net_device *dev)
1074 return mii_nway_restart(&ks->mii); 1308 return mii_nway_restart(&ks->mii);
1075} 1309}
1076 1310
1311static int ks8851_get_eeprom_len(struct net_device *dev)
1312{
1313 struct ks8851_net *ks = netdev_priv(dev);
1314 return ks->eeprom_size;
1315}
1316
1317static int ks8851_get_eeprom(struct net_device *dev,
1318 struct ethtool_eeprom *eeprom, u8 *bytes)
1319{
1320 struct ks8851_net *ks = netdev_priv(dev);
1321 u16 *eeprom_buff;
1322 int first_word;
1323 int last_word;
1324 int ret_val = 0;
1325 u16 i;
1326
1327 if (eeprom->len == 0)
1328 return -EINVAL;
1329
1330 if (eeprom->len > ks->eeprom_size)
1331 return -EINVAL;
1332
1333 eeprom->magic = ks8851_rdreg16(ks, KS_CIDER);
1334
1335 first_word = eeprom->offset >> 1;
1336 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
1337
1338 eeprom_buff = kmalloc(sizeof(u16) *
1339 (last_word - first_word + 1), GFP_KERNEL);
1340 if (!eeprom_buff)
1341 return -ENOMEM;
1342
1343 for (i = 0; i < last_word - first_word + 1; i++)
1344 eeprom_buff[i] = ks8851_eeprom_read(dev, first_word + 1);
1345
1346 /* Device's eeprom is little-endian, word addressable */
1347 for (i = 0; i < last_word - first_word + 1; i++)
1348 le16_to_cpus(&eeprom_buff[i]);
1349
1350 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
1351 kfree(eeprom_buff);
1352
1353 return ret_val;
1354}
1355
1356static int ks8851_set_eeprom(struct net_device *dev,
1357 struct ethtool_eeprom *eeprom, u8 *bytes)
1358{
1359 struct ks8851_net *ks = netdev_priv(dev);
1360 u16 *eeprom_buff;
1361 void *ptr;
1362 int max_len;
1363 int first_word;
1364 int last_word;
1365 int ret_val = 0;
1366 u16 i;
1367
1368 if (eeprom->len == 0)
1369 return -EOPNOTSUPP;
1370
1371 if (eeprom->len > ks->eeprom_size)
1372 return -EINVAL;
1373
1374 if (eeprom->magic != ks8851_rdreg16(ks, KS_CIDER))
1375 return -EFAULT;
1376
1377 first_word = eeprom->offset >> 1;
1378 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
1379 max_len = (last_word - first_word + 1) * 2;
1380 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
1381 if (!eeprom_buff)
1382 return -ENOMEM;
1383
1384 ptr = (void *)eeprom_buff;
1385
1386 if (eeprom->offset & 1) {
1387 /* need read/modify/write of first changed EEPROM word */
1388 /* only the second byte of the word is being modified */
1389 eeprom_buff[0] = ks8851_eeprom_read(dev, first_word);
1390 ptr++;
1391 }
1392 if ((eeprom->offset + eeprom->len) & 1)
1393 /* need read/modify/write of last changed EEPROM word */
1394 /* only the first byte of the word is being modified */
1395 eeprom_buff[last_word - first_word] =
1396 ks8851_eeprom_read(dev, last_word);
1397
1398
1399 /* Device's eeprom is little-endian, word addressable */
1400 le16_to_cpus(&eeprom_buff[0]);
1401 le16_to_cpus(&eeprom_buff[last_word - first_word]);
1402
1403 memcpy(ptr, bytes, eeprom->len);
1404
1405 for (i = 0; i < last_word - first_word + 1; i++)
1406 eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
1407
1408 ks8851_eeprom_write(dev, EEPROM_OP_EWEN, 0, 0);
1409
1410 for (i = 0; i < last_word - first_word + 1; i++) {
1411 ks8851_eeprom_write(dev, EEPROM_OP_WRITE, first_word + i,
1412 eeprom_buff[i]);
1413 mdelay(EEPROM_WRITE_TIME);
1414 }
1415
1416 ks8851_eeprom_write(dev, EEPROM_OP_EWDS, 0, 0);
1417
1418 kfree(eeprom_buff);
1419 return ret_val;
1420}
1421
1077static const struct ethtool_ops ks8851_ethtool_ops = { 1422static const struct ethtool_ops ks8851_ethtool_ops = {
1078 .get_drvinfo = ks8851_get_drvinfo, 1423 .get_drvinfo = ks8851_get_drvinfo,
1079 .get_msglevel = ks8851_get_msglevel, 1424 .get_msglevel = ks8851_get_msglevel,
@@ -1082,6 +1427,9 @@ static const struct ethtool_ops ks8851_ethtool_ops = {
1082 .set_settings = ks8851_set_settings, 1427 .set_settings = ks8851_set_settings,
1083 .get_link = ks8851_get_link, 1428 .get_link = ks8851_get_link,
1084 .nway_reset = ks8851_nway_reset, 1429 .nway_reset = ks8851_nway_reset,
1430 .get_eeprom_len = ks8851_get_eeprom_len,
1431 .get_eeprom = ks8851_get_eeprom,
1432 .set_eeprom = ks8851_set_eeprom,
1085}; 1433};
1086 1434
1087/* MII interface controls */ 1435/* MII interface controls */
@@ -1267,6 +1615,14 @@ static int __devinit ks8851_probe(struct spi_device *spi)
1267 goto err_id; 1615 goto err_id;
1268 } 1616 }
1269 1617
1618 /* cache the contents of the CCR register for EEPROM, etc. */
1619 ks->rc_ccr = ks8851_rdreg16(ks, KS_CCR);
1620
1621 if (ks->rc_ccr & CCR_EEPROM)
1622 ks->eeprom_size = 128;
1623 else
1624 ks->eeprom_size = 0;
1625
1270 ks8851_read_selftest(ks); 1626 ks8851_read_selftest(ks);
1271 ks8851_init_mac(ks); 1627 ks8851_init_mac(ks);
1272 1628
diff --git a/drivers/net/ks8851.h b/drivers/net/ks8851.h
index f52c312cc356..537fb06e5932 100644
--- a/drivers/net/ks8851.h
+++ b/drivers/net/ks8851.h
@@ -25,12 +25,24 @@
25#define OBCR_ODS_16mA (1 << 6) 25#define OBCR_ODS_16mA (1 << 6)
26 26
27#define KS_EEPCR 0x22 27#define KS_EEPCR 0x22
28#define EEPCR_EESRWA (1 << 5)
28#define EEPCR_EESA (1 << 4) 29#define EEPCR_EESA (1 << 4)
29#define EEPCR_EESB (1 << 3) 30#define EEPCR_EESB_OFFSET 3
31#define EEPCR_EESB (1 << EEPCR_EESB_OFFSET)
30#define EEPCR_EEDO (1 << 2) 32#define EEPCR_EEDO (1 << 2)
31#define EEPCR_EESCK (1 << 1) 33#define EEPCR_EESCK (1 << 1)
32#define EEPCR_EECS (1 << 0) 34#define EEPCR_EECS (1 << 0)
33 35
36#define EEPROM_OP_LEN 3 /* bits:*/
37#define EEPROM_OP_READ 0x06
38#define EEPROM_OP_EWEN 0x04
39#define EEPROM_OP_WRITE 0x05
40#define EEPROM_OP_EWDS 0x14
41
42#define EEPROM_DATA_LEN 16 /* 16 bits EEPROM */
43#define EEPROM_WRITE_TIME 4 /* wrt ack time in ms */
44#define EEPROM_SK_PERIOD 400 /* in us */
45
34#define KS_MBIR 0x24 46#define KS_MBIR 0x24
35#define MBIR_TXMBF (1 << 12) 47#define MBIR_TXMBF (1 << 12)
36#define MBIR_TXMBFA (1 << 11) 48#define MBIR_TXMBFA (1 << 11)
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index 526dc9cbc3c6..770b606a9e3a 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -445,14 +445,14 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id)
445 445
446 if (ei_local->irqlock) 446 if (ei_local->irqlock)
447 { 447 {
448#if 1 /* This might just be an interrupt for a PCI device sharing this line */ 448 /*
449 /* The "irqlock" check is only for testing. */ 449 * This might just be an interrupt for a PCI device sharing
450 printk(ei_local->irqlock 450 * this line
451 ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n" 451 */
452 : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n", 452 printk("%s: Interrupted while interrupts are masked!"
453 " isr=%#2x imr=%#2x.\n",
453 dev->name, ei_inb_p(e8390_base + EN0_ISR), 454 dev->name, ei_inb_p(e8390_base + EN0_ISR),
454 ei_inb_p(e8390_base + EN0_IMR)); 455 ei_inb_p(e8390_base + EN0_IMR));
455#endif
456 spin_unlock(&ei_local->page_lock); 456 spin_unlock(&ei_local->page_lock);
457 return IRQ_NONE; 457 return IRQ_NONE;
458 } 458 }
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index abba3cc81f12..a8a94e2f6ddc 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -37,6 +37,8 @@
37struct macvtap_queue { 37struct macvtap_queue {
38 struct sock sk; 38 struct sock sk;
39 struct socket sock; 39 struct socket sock;
40 struct socket_wq wq;
41 int vnet_hdr_sz;
40 struct macvlan_dev *vlan; 42 struct macvlan_dev *vlan;
41 struct file *file; 43 struct file *file;
42 unsigned int flags; 44 unsigned int flags;
@@ -181,7 +183,7 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
181 return -ENOLINK; 183 return -ENOLINK;
182 184
183 skb_queue_tail(&q->sk.sk_receive_queue, skb); 185 skb_queue_tail(&q->sk.sk_receive_queue, skb);
184 wake_up_interruptible_poll(q->sk.sk_sleep, POLLIN | POLLRDNORM | POLLRDBAND); 186 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
185 return 0; 187 return 0;
186} 188}
187 189
@@ -242,12 +244,15 @@ static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
242 244
243static void macvtap_sock_write_space(struct sock *sk) 245static void macvtap_sock_write_space(struct sock *sk)
244{ 246{
247 wait_queue_head_t *wqueue;
248
245 if (!sock_writeable(sk) || 249 if (!sock_writeable(sk) ||
246 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) 250 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
247 return; 251 return;
248 252
249 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 253 wqueue = sk_sleep(sk);
250 wake_up_interruptible_poll(sk->sk_sleep, POLLOUT | POLLWRNORM | POLLWRBAND); 254 if (wqueue && waitqueue_active(wqueue))
255 wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
251} 256}
252 257
253static int macvtap_open(struct inode *inode, struct file *file) 258static int macvtap_open(struct inode *inode, struct file *file)
@@ -272,7 +277,8 @@ static int macvtap_open(struct inode *inode, struct file *file)
272 if (!q) 277 if (!q)
273 goto out; 278 goto out;
274 279
275 init_waitqueue_head(&q->sock.wait); 280 q->sock.wq = &q->wq;
281 init_waitqueue_head(&q->wq.wait);
276 q->sock.type = SOCK_RAW; 282 q->sock.type = SOCK_RAW;
277 q->sock.state = SS_CONNECTED; 283 q->sock.state = SS_CONNECTED;
278 q->sock.file = file; 284 q->sock.file = file;
@@ -280,6 +286,7 @@ static int macvtap_open(struct inode *inode, struct file *file)
280 sock_init_data(&q->sock, &q->sk); 286 sock_init_data(&q->sock, &q->sk);
281 q->sk.sk_write_space = macvtap_sock_write_space; 287 q->sk.sk_write_space = macvtap_sock_write_space;
282 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; 288 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
289 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
283 290
284 err = macvtap_set_queue(dev, file, q); 291 err = macvtap_set_queue(dev, file, q);
285 if (err) 292 if (err)
@@ -308,7 +315,7 @@ static unsigned int macvtap_poll(struct file *file, poll_table * wait)
308 goto out; 315 goto out;
309 316
310 mask = 0; 317 mask = 0;
311 poll_wait(file, &q->sock.wait, wait); 318 poll_wait(file, &q->wq.wait, wait);
312 319
313 if (!skb_queue_empty(&q->sk.sk_receive_queue)) 320 if (!skb_queue_empty(&q->sk.sk_receive_queue))
314 mask |= POLLIN | POLLRDNORM; 321 mask |= POLLIN | POLLRDNORM;
@@ -440,14 +447,14 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
440 int vnet_hdr_len = 0; 447 int vnet_hdr_len = 0;
441 448
442 if (q->flags & IFF_VNET_HDR) { 449 if (q->flags & IFF_VNET_HDR) {
443 vnet_hdr_len = sizeof(vnet_hdr); 450 vnet_hdr_len = q->vnet_hdr_sz;
444 451
445 err = -EINVAL; 452 err = -EINVAL;
446 if ((len -= vnet_hdr_len) < 0) 453 if ((len -= vnet_hdr_len) < 0)
447 goto err; 454 goto err;
448 455
449 err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0, 456 err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
450 vnet_hdr_len); 457 sizeof(vnet_hdr));
451 if (err < 0) 458 if (err < 0)
452 goto err; 459 goto err;
453 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 460 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
@@ -529,7 +536,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
529 536
530 if (q->flags & IFF_VNET_HDR) { 537 if (q->flags & IFF_VNET_HDR) {
531 struct virtio_net_hdr vnet_hdr; 538 struct virtio_net_hdr vnet_hdr;
532 vnet_hdr_len = sizeof (vnet_hdr); 539 vnet_hdr_len = q->vnet_hdr_sz;
533 if ((len -= vnet_hdr_len) < 0) 540 if ((len -= vnet_hdr_len) < 0)
534 return -EINVAL; 541 return -EINVAL;
535 542
@@ -537,7 +544,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
537 if (ret) 544 if (ret)
538 return ret; 545 return ret;
539 546
540 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, vnet_hdr_len)) 547 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
541 return -EFAULT; 548 return -EFAULT;
542 } 549 }
543 550
@@ -562,7 +569,7 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
562 struct sk_buff *skb; 569 struct sk_buff *skb;
563 ssize_t ret = 0; 570 ssize_t ret = 0;
564 571
565 add_wait_queue(q->sk.sk_sleep, &wait); 572 add_wait_queue(sk_sleep(&q->sk), &wait);
566 while (len) { 573 while (len) {
567 current->state = TASK_INTERRUPTIBLE; 574 current->state = TASK_INTERRUPTIBLE;
568 575
@@ -587,7 +594,7 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
587 } 594 }
588 595
589 current->state = TASK_RUNNING; 596 current->state = TASK_RUNNING;
590 remove_wait_queue(q->sk.sk_sleep, &wait); 597 remove_wait_queue(sk_sleep(&q->sk), &wait);
591 return ret; 598 return ret;
592} 599}
593 600
@@ -622,6 +629,8 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
622 struct ifreq __user *ifr = argp; 629 struct ifreq __user *ifr = argp;
623 unsigned int __user *up = argp; 630 unsigned int __user *up = argp;
624 unsigned int u; 631 unsigned int u;
632 int __user *sp = argp;
633 int s;
625 int ret; 634 int ret;
626 635
627 switch (cmd) { 636 switch (cmd) {
@@ -667,6 +676,21 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
667 q->sk.sk_sndbuf = u; 676 q->sk.sk_sndbuf = u;
668 return 0; 677 return 0;
669 678
679 case TUNGETVNETHDRSZ:
680 s = q->vnet_hdr_sz;
681 if (put_user(s, sp))
682 return -EFAULT;
683 return 0;
684
685 case TUNSETVNETHDRSZ:
686 if (get_user(s, sp))
687 return -EFAULT;
688 if (s < (int)sizeof(struct virtio_net_hdr))
689 return -EINVAL;
690
691 q->vnet_hdr_sz = s;
692 return 0;
693
670 case TUNSETOFFLOAD: 694 case TUNSETOFFLOAD:
671 /* let the user check for future flags */ 695 /* let the user check for future flags */
672 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | 696 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index a361dea35574..ca142c47b2e4 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -665,7 +665,8 @@ static int netconsole_netdev_event(struct notifier_block *this,
665 struct netconsole_target *nt; 665 struct netconsole_target *nt;
666 struct net_device *dev = ptr; 666 struct net_device *dev = ptr;
667 667
668 if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER)) 668 if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER ||
669 event == NETDEV_BONDING_DESLAVE || event == NETDEV_GOING_DOWN))
669 goto done; 670 goto done;
670 671
671 spin_lock_irqsave(&target_list_lock, flags); 672 spin_lock_irqsave(&target_list_lock, flags);
@@ -677,19 +678,21 @@ static int netconsole_netdev_event(struct notifier_block *this,
677 strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ); 678 strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ);
678 break; 679 break;
679 case NETDEV_UNREGISTER: 680 case NETDEV_UNREGISTER:
680 if (!nt->enabled)
681 break;
682 netpoll_cleanup(&nt->np); 681 netpoll_cleanup(&nt->np);
682 /* Fall through */
683 case NETDEV_GOING_DOWN:
684 case NETDEV_BONDING_DESLAVE:
683 nt->enabled = 0; 685 nt->enabled = 0;
684 printk(KERN_INFO "netconsole: network logging stopped"
685 ", interface %s unregistered\n",
686 dev->name);
687 break; 686 break;
688 } 687 }
689 } 688 }
690 netconsole_target_put(nt); 689 netconsole_target_put(nt);
691 } 690 }
692 spin_unlock_irqrestore(&target_list_lock, flags); 691 spin_unlock_irqrestore(&target_list_lock, flags);
692 if (event == NETDEV_UNREGISTER || event == NETDEV_BONDING_DESLAVE)
693 printk(KERN_INFO "netconsole: network logging stopped, "
694 "interface %s %s\n", dev->name,
695 event == NETDEV_UNREGISTER ? "unregistered" : "released slaves");
693 696
694done: 697done:
695 return NOTIFY_DONE; 698 return NOTIFY_DONE;
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index ef940226ee22..30abb4e436f1 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -36,8 +36,8 @@
36#include "niu.h" 36#include "niu.h"
37 37
38#define DRV_MODULE_NAME "niu" 38#define DRV_MODULE_NAME "niu"
39#define DRV_MODULE_VERSION "1.0" 39#define DRV_MODULE_VERSION "1.1"
40#define DRV_MODULE_RELDATE "Nov 14, 2008" 40#define DRV_MODULE_RELDATE "Apr 22, 2010"
41 41
42static char version[] __devinitdata = 42static char version[] __devinitdata =
43 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 43 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -3444,6 +3444,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3444 struct rx_ring_info *rp) 3444 struct rx_ring_info *rp)
3445{ 3445{
3446 unsigned int index = rp->rcr_index; 3446 unsigned int index = rp->rcr_index;
3447 struct rx_pkt_hdr1 *rh;
3447 struct sk_buff *skb; 3448 struct sk_buff *skb;
3448 int len, num_rcr; 3449 int len, num_rcr;
3449 3450
@@ -3477,9 +3478,6 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3477 if (num_rcr == 1) { 3478 if (num_rcr == 1) {
3478 int ptype; 3479 int ptype;
3479 3480
3480 off += 2;
3481 append_size -= 2;
3482
3483 ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT); 3481 ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
3484 if ((ptype == RCR_PKT_TYPE_TCP || 3482 if ((ptype == RCR_PKT_TYPE_TCP ||
3485 ptype == RCR_PKT_TYPE_UDP) && 3483 ptype == RCR_PKT_TYPE_UDP) &&
@@ -3488,8 +3486,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3488 skb->ip_summed = CHECKSUM_UNNECESSARY; 3486 skb->ip_summed = CHECKSUM_UNNECESSARY;
3489 else 3487 else
3490 skb->ip_summed = CHECKSUM_NONE; 3488 skb->ip_summed = CHECKSUM_NONE;
3491 } 3489 } else if (!(val & RCR_ENTRY_MULTI))
3492 if (!(val & RCR_ENTRY_MULTI))
3493 append_size = len - skb->len; 3490 append_size = len - skb->len;
3494 3491
3495 niu_rx_skb_append(skb, page, off, append_size); 3492 niu_rx_skb_append(skb, page, off, append_size);
@@ -3510,8 +3507,17 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3510 } 3507 }
3511 rp->rcr_index = index; 3508 rp->rcr_index = index;
3512 3509
3513 skb_reserve(skb, NET_IP_ALIGN); 3510 len += sizeof(*rh);
3514 __pskb_pull_tail(skb, min(len, VLAN_ETH_HLEN)); 3511 len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN);
3512 __pskb_pull_tail(skb, len);
3513
3514 rh = (struct rx_pkt_hdr1 *) skb->data;
3515 if (np->dev->features & NETIF_F_RXHASH)
3516 skb->rxhash = ((u32)rh->hashval2_0 << 24 |
3517 (u32)rh->hashval2_1 << 16 |
3518 (u32)rh->hashval1_1 << 8 |
3519 (u32)rh->hashval1_2 << 0);
3520 skb_pull(skb, sizeof(*rh));
3515 3521
3516 rp->rx_packets++; 3522 rp->rx_packets++;
3517 rp->rx_bytes += skb->len; 3523 rp->rx_bytes += skb->len;
@@ -4946,7 +4952,9 @@ static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
4946 RX_DMA_CTL_STAT_RCRTO | 4952 RX_DMA_CTL_STAT_RCRTO |
4947 RX_DMA_CTL_STAT_RBR_EMPTY)); 4953 RX_DMA_CTL_STAT_RBR_EMPTY));
4948 nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32); 4954 nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
4949 nw64(RXDMA_CFIG2(channel), (rp->mbox_dma & 0x00000000ffffffc0)); 4955 nw64(RXDMA_CFIG2(channel),
4956 ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) |
4957 RXDMA_CFIG2_FULL_HDR));
4950 nw64(RBR_CFIG_A(channel), 4958 nw64(RBR_CFIG_A(channel),
4951 ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) | 4959 ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
4952 (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR))); 4960 (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
@@ -7910,6 +7918,18 @@ static int niu_phys_id(struct net_device *dev, u32 data)
7910 return 0; 7918 return 0;
7911} 7919}
7912 7920
7921static int niu_set_flags(struct net_device *dev, u32 data)
7922{
7923 if (data & (ETH_FLAG_LRO | ETH_FLAG_NTUPLE))
7924 return -EOPNOTSUPP;
7925
7926 if (data & ETH_FLAG_RXHASH)
7927 dev->features |= NETIF_F_RXHASH;
7928 else
7929 dev->features &= ~NETIF_F_RXHASH;
7930 return 0;
7931}
7932
7913static const struct ethtool_ops niu_ethtool_ops = { 7933static const struct ethtool_ops niu_ethtool_ops = {
7914 .get_drvinfo = niu_get_drvinfo, 7934 .get_drvinfo = niu_get_drvinfo,
7915 .get_link = ethtool_op_get_link, 7935 .get_link = ethtool_op_get_link,
@@ -7926,6 +7946,8 @@ static const struct ethtool_ops niu_ethtool_ops = {
7926 .phys_id = niu_phys_id, 7946 .phys_id = niu_phys_id,
7927 .get_rxnfc = niu_get_nfc, 7947 .get_rxnfc = niu_get_nfc,
7928 .set_rxnfc = niu_set_nfc, 7948 .set_rxnfc = niu_set_nfc,
7949 .set_flags = niu_set_flags,
7950 .get_flags = ethtool_op_get_flags,
7929}; 7951};
7930 7952
7931static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, 7953static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
@@ -9754,6 +9776,12 @@ static void __devinit niu_device_announce(struct niu *np)
9754 } 9776 }
9755} 9777}
9756 9778
9779static void __devinit niu_set_basic_features(struct net_device *dev)
9780{
9781 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM |
9782 NETIF_F_GRO | NETIF_F_RXHASH);
9783}
9784
9757static int __devinit niu_pci_init_one(struct pci_dev *pdev, 9785static int __devinit niu_pci_init_one(struct pci_dev *pdev,
9758 const struct pci_device_id *ent) 9786 const struct pci_device_id *ent)
9759{ 9787{
@@ -9838,7 +9866,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
9838 } 9866 }
9839 } 9867 }
9840 9868
9841 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM); 9869 niu_set_basic_features(dev);
9842 9870
9843 np->regs = pci_ioremap_bar(pdev, 0); 9871 np->regs = pci_ioremap_bar(pdev, 0);
9844 if (!np->regs) { 9872 if (!np->regs) {
@@ -10080,7 +10108,7 @@ static int __devinit niu_of_probe(struct of_device *op,
10080 goto err_out_free_dev; 10108 goto err_out_free_dev;
10081 } 10109 }
10082 10110
10083 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM); 10111 niu_set_basic_features(dev);
10084 10112
10085 np->regs = of_ioremap(&op->resource[1], 0, 10113 np->regs = of_ioremap(&op->resource[1], 0,
10086 resource_size(&op->resource[1]), 10114 resource_size(&op->resource[1]),
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index 3bd0b5933d59..d6715465f35d 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -2706,7 +2706,7 @@ struct rx_pkt_hdr0 {
2706#if defined(__LITTLE_ENDIAN_BITFIELD) 2706#if defined(__LITTLE_ENDIAN_BITFIELD)
2707 u8 inputport:2, 2707 u8 inputport:2,
2708 maccheck:1, 2708 maccheck:1,
2709 class:4; 2709 class:5;
2710 u8 vlan:1, 2710 u8 vlan:1,
2711 llcsnap:1, 2711 llcsnap:1,
2712 noport:1, 2712 noport:1,
@@ -2715,7 +2715,7 @@ struct rx_pkt_hdr0 {
2715 tres:2, 2715 tres:2,
2716 tzfvld:1; 2716 tzfvld:1;
2717#elif defined(__BIG_ENDIAN_BITFIELD) 2717#elif defined(__BIG_ENDIAN_BITFIELD)
2718 u8 class:4, 2718 u8 class:5,
2719 maccheck:1, 2719 maccheck:1,
2720 inputport:2; 2720 inputport:2;
2721 u8 tzfvld:1, 2721 u8 tzfvld:1,
@@ -2775,6 +2775,9 @@ struct rx_pkt_hdr1 {
2775 /* Bits 7:0 of hash value, H1. */ 2775 /* Bits 7:0 of hash value, H1. */
2776 u8 hashval1_2; 2776 u8 hashval1_2;
2777 2777
2778 u8 hwrsvd5;
2779 u8 hwrsvd6;
2780
2778 u8 usrdata_0; /* Bits 39:32 of user data. */ 2781 u8 usrdata_0; /* Bits 39:32 of user data. */
2779 u8 usrdata_1; /* Bits 31:24 of user data. */ 2782 u8 usrdata_1; /* Bits 31:24 of user data. */
2780 u8 usrdata_2; /* Bits 23:16 of user data. */ 2783 u8 usrdata_2; /* Bits 23:16 of user data. */
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
index 6b1d443f2ce5..392470333174 100644
--- a/drivers/net/octeon/octeon_mgmt.c
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -189,12 +189,19 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
189 189
190 mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); 190 mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
191 while (mix_orcnt.s.orcnt) { 191 while (mix_orcnt.s.orcnt) {
192 spin_lock_irqsave(&p->tx_list.lock, flags);
193
194 mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
195
196 if (mix_orcnt.s.orcnt == 0) {
197 spin_unlock_irqrestore(&p->tx_list.lock, flags);
198 break;
199 }
200
192 dma_sync_single_for_cpu(p->dev, p->tx_ring_handle, 201 dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
193 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), 202 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
194 DMA_BIDIRECTIONAL); 203 DMA_BIDIRECTIONAL);
195 204
196 spin_lock_irqsave(&p->tx_list.lock, flags);
197
198 re.d64 = p->tx_ring[p->tx_next_clean]; 205 re.d64 = p->tx_ring[p->tx_next_clean];
199 p->tx_next_clean = 206 p->tx_next_clean =
200 (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE; 207 (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
@@ -373,7 +380,6 @@ done:
373 mix_ircnt.s.ircnt = 1; 380 mix_ircnt.s.ircnt = 1;
374 cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64); 381 cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64);
375 return rc; 382 return rc;
376
377} 383}
378 384
379static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) 385static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
@@ -383,7 +389,6 @@ static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
383 union cvmx_mixx_ircnt mix_ircnt; 389 union cvmx_mixx_ircnt mix_ircnt;
384 int rc; 390 int rc;
385 391
386
387 mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); 392 mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
388 while (work_done < budget && mix_ircnt.s.ircnt) { 393 while (work_done < budget && mix_ircnt.s.ircnt) {
389 394
@@ -475,12 +480,11 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
475 unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */ 480 unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
476 struct octeon_mgmt_cam_state cam_state; 481 struct octeon_mgmt_cam_state cam_state;
477 struct netdev_hw_addr *ha; 482 struct netdev_hw_addr *ha;
478 struct list_head *pos;
479 int available_cam_entries; 483 int available_cam_entries;
480 484
481 memset(&cam_state, 0, sizeof(cam_state)); 485 memset(&cam_state, 0, sizeof(cam_state));
482 486
483 if ((netdev->flags & IFF_PROMISC) || netdev->dev_addrs.count > 7) { 487 if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
484 cam_mode = 0; 488 cam_mode = 0;
485 available_cam_entries = 8; 489 available_cam_entries = 8;
486 } else { 490 } else {
@@ -488,13 +492,13 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
488 * One CAM entry for the primary address, leaves seven 492 * One CAM entry for the primary address, leaves seven
489 * for the secondary addresses. 493 * for the secondary addresses.
490 */ 494 */
491 available_cam_entries = 7 - netdev->dev_addrs.count; 495 available_cam_entries = 7 - netdev->uc.count;
492 } 496 }
493 497
494 if (netdev->flags & IFF_MULTICAST) { 498 if (netdev->flags & IFF_MULTICAST) {
495 if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) || 499 if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
496 netdev_mc_count(netdev) > available_cam_entries) 500 netdev_mc_count(netdev) > available_cam_entries)
497 multicast_mode = 2; /* 1 - Accept all multicast. */ 501 multicast_mode = 2; /* 2 - Accept all multicast. */
498 else 502 else
499 multicast_mode = 0; /* 0 - Use CAM. */ 503 multicast_mode = 0; /* 0 - Use CAM. */
500 } 504 }
@@ -502,19 +506,14 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
502 if (cam_mode == 1) { 506 if (cam_mode == 1) {
503 /* Add primary address. */ 507 /* Add primary address. */
504 octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr); 508 octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
505 list_for_each(pos, &netdev->dev_addrs.list) { 509 netdev_for_each_uc_addr(ha, netdev)
506 struct netdev_hw_addr *hw_addr; 510 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
507 hw_addr = list_entry(pos, struct netdev_hw_addr, list);
508 octeon_mgmt_cam_state_add(&cam_state, hw_addr->addr);
509 list = list->next;
510 }
511 } 511 }
512 if (multicast_mode == 0) { 512 if (multicast_mode == 0) {
513 netdev_for_each_mc_addr(ha, netdev) 513 netdev_for_each_mc_addr(ha, netdev)
514 octeon_mgmt_cam_state_add(&cam_state, ha->addr); 514 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
515 } 515 }
516 516
517
518 spin_lock_irqsave(&p->lock, flags); 517 spin_lock_irqsave(&p->lock, flags);
519 518
520 /* Disable packet I/O. */ 519 /* Disable packet I/O. */
@@ -523,7 +522,6 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
523 agl_gmx_prtx.s.en = 0; 522 agl_gmx_prtx.s.en = 0;
524 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); 523 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
525 524
526
527 adr_ctl.u64 = 0; 525 adr_ctl.u64 = 0;
528 adr_ctl.s.cam_mode = cam_mode; 526 adr_ctl.s.cam_mode = cam_mode;
529 adr_ctl.s.mcst = multicast_mode; 527 adr_ctl.s.mcst = multicast_mode;
@@ -596,8 +594,7 @@ static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
596 mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port)); 594 mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port));
597 595
598 /* Clear any pending interrupts */ 596 /* Clear any pending interrupts */
599 cvmx_write_csr(CVMX_MIXX_ISR(port), 597 cvmx_write_csr(CVMX_MIXX_ISR(port), mixx_isr.u64);
600 cvmx_read_csr(CVMX_MIXX_ISR(port)));
601 cvmx_read_csr(CVMX_MIXX_ISR(port)); 598 cvmx_read_csr(CVMX_MIXX_ISR(port));
602 599
603 if (mixx_isr.s.irthresh) { 600 if (mixx_isr.s.irthresh) {
@@ -831,9 +828,9 @@ static int octeon_mgmt_open(struct net_device *netdev)
831 mix_irhwm.s.irhwm = 0; 828 mix_irhwm.s.irhwm = 0;
832 cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64); 829 cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64);
833 830
834 /* Interrupt when we have 5 or more packets to clean. */ 831 /* Interrupt when we have 1 or more packets to clean. */
835 mix_orhwm.u64 = 0; 832 mix_orhwm.u64 = 0;
836 mix_orhwm.s.orhwm = 5; 833 mix_orhwm.s.orhwm = 1;
837 cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64); 834 cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64);
838 835
839 /* Enable receive and transmit interrupts */ 836 /* Enable receive and transmit interrupts */
@@ -927,7 +924,6 @@ static int octeon_mgmt_stop(struct net_device *netdev)
927 924
928 octeon_mgmt_reset_hw(p); 925 octeon_mgmt_reset_hw(p);
929 926
930
931 free_irq(p->irq, netdev); 927 free_irq(p->irq, netdev);
932 928
933 /* dma_unmap is a nop on Octeon, so just free everything. */ 929 /* dma_unmap is a nop on Octeon, so just free everything. */
@@ -944,7 +940,6 @@ static int octeon_mgmt_stop(struct net_device *netdev)
944 DMA_BIDIRECTIONAL); 940 DMA_BIDIRECTIONAL);
945 kfree(p->tx_ring); 941 kfree(p->tx_ring);
946 942
947
948 return 0; 943 return 0;
949} 944}
950 945
@@ -954,6 +949,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
954 int port = p->port; 949 int port = p->port;
955 union mgmt_port_ring_entry re; 950 union mgmt_port_ring_entry re;
956 unsigned long flags; 951 unsigned long flags;
952 int rv = NETDEV_TX_BUSY;
957 953
958 re.d64 = 0; 954 re.d64 = 0;
959 re.s.len = skb->len; 955 re.s.len = skb->len;
@@ -963,15 +959,18 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
963 959
964 spin_lock_irqsave(&p->tx_list.lock, flags); 960 spin_lock_irqsave(&p->tx_list.lock, flags);
965 961
962 if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
963 spin_unlock_irqrestore(&p->tx_list.lock, flags);
964 netif_stop_queue(netdev);
965 spin_lock_irqsave(&p->tx_list.lock, flags);
966 }
967
966 if (unlikely(p->tx_current_fill >= 968 if (unlikely(p->tx_current_fill >=
967 ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) { 969 ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
968 spin_unlock_irqrestore(&p->tx_list.lock, flags); 970 spin_unlock_irqrestore(&p->tx_list.lock, flags);
969
970 dma_unmap_single(p->dev, re.s.addr, re.s.len, 971 dma_unmap_single(p->dev, re.s.addr, re.s.len,
971 DMA_TO_DEVICE); 972 DMA_TO_DEVICE);
972 973 goto out;
973 netif_stop_queue(netdev);
974 return NETDEV_TX_BUSY;
975 } 974 }
976 975
977 __skb_queue_tail(&p->tx_list, skb); 976 __skb_queue_tail(&p->tx_list, skb);
@@ -994,9 +993,10 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
994 cvmx_write_csr(CVMX_MIXX_ORING2(port), 1); 993 cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
995 994
996 netdev->trans_start = jiffies; 995 netdev->trans_start = jiffies;
997 octeon_mgmt_clean_tx_buffers(p); 996 rv = NETDEV_TX_OK;
997out:
998 octeon_mgmt_update_tx_stats(netdev); 998 octeon_mgmt_update_tx_stats(netdev);
999 return NETDEV_TX_OK; 999 return rv;
1000} 1000}
1001 1001
1002#ifdef CONFIG_NET_POLL_CONTROLLER 1002#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1106,7 +1106,6 @@ static int __init octeon_mgmt_probe(struct platform_device *pdev)
1106 netdev->netdev_ops = &octeon_mgmt_ops; 1106 netdev->netdev_ops = &octeon_mgmt_ops;
1107 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; 1107 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1108 1108
1109
1110 /* The mgmt ports get the first N MACs. */ 1109 /* The mgmt ports get the first N MACs. */
1111 for (i = 0; i < 6; i++) 1110 for (i = 0; i < 6; i++)
1112 netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i]; 1111 netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i];
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 3d1d3a7b7ed3..757f87bb1db3 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -781,8 +781,13 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
781 inw(ioaddr + EL3_STATUS)); 781 inw(ioaddr + EL3_STATUS));
782 782
783 spin_lock_irqsave(&lp->window_lock, flags); 783 spin_lock_irqsave(&lp->window_lock, flags);
784
785 dev->stats.tx_bytes += skb->len;
786
787 /* Put out the doubleword header... */
784 outw(skb->len, ioaddr + TX_FIFO); 788 outw(skb->len, ioaddr + TX_FIFO);
785 outw(0, ioaddr + TX_FIFO); 789 outw(0, ioaddr + TX_FIFO);
790 /* ... and the packet rounded to a doubleword. */
786 outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2); 791 outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2);
787 792
788 dev->trans_start = jiffies; 793 dev->trans_start = jiffies;
@@ -1021,8 +1026,6 @@ static void update_stats(struct net_device *dev)
1021 /* BadSSD */ inb(ioaddr + 12); 1026 /* BadSSD */ inb(ioaddr + 12);
1022 up = inb(ioaddr + 13); 1027 up = inb(ioaddr + 13);
1023 1028
1024 dev->stats.tx_bytes += tx + ((up & 0xf0) << 12);
1025
1026 EL3WINDOW(1); 1029 EL3WINDOW(1);
1027} 1030}
1028 1031
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 408f3d7b1545..949ac1a12537 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -1804,23 +1804,30 @@ static void media_check(u_long arg)
1804 SMC_SELECT_BANK(1); 1804 SMC_SELECT_BANK(1);
1805 media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1; 1805 media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1;
1806 1806
1807 SMC_SELECT_BANK(saved_bank);
1808 spin_unlock_irqrestore(&smc->lock, flags);
1809
1807 /* Check for pending interrupt with watchdog flag set: with 1810 /* Check for pending interrupt with watchdog flag set: with
1808 this, we can limp along even if the interrupt is blocked */ 1811 this, we can limp along even if the interrupt is blocked */
1809 if (smc->watchdog++ && ((i>>8) & i)) { 1812 if (smc->watchdog++ && ((i>>8) & i)) {
1810 if (!smc->fast_poll) 1813 if (!smc->fast_poll)
1811 printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name); 1814 printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
1815 local_irq_save(flags);
1812 smc_interrupt(dev->irq, dev); 1816 smc_interrupt(dev->irq, dev);
1817 local_irq_restore(flags);
1813 smc->fast_poll = HZ; 1818 smc->fast_poll = HZ;
1814 } 1819 }
1815 if (smc->fast_poll) { 1820 if (smc->fast_poll) {
1816 smc->fast_poll--; 1821 smc->fast_poll--;
1817 smc->media.expires = jiffies + HZ/100; 1822 smc->media.expires = jiffies + HZ/100;
1818 add_timer(&smc->media); 1823 add_timer(&smc->media);
1819 SMC_SELECT_BANK(saved_bank);
1820 spin_unlock_irqrestore(&smc->lock, flags);
1821 return; 1824 return;
1822 } 1825 }
1823 1826
1827 spin_lock_irqsave(&smc->lock, flags);
1828
1829 saved_bank = inw(ioaddr + BANK_SELECT);
1830
1824 if (smc->cfg & CFG_MII_SELECT) { 1831 if (smc->cfg & CFG_MII_SELECT) {
1825 if (smc->mii_if.phy_id < 0) 1832 if (smc->mii_if.phy_id < 0)
1826 goto reschedule; 1833 goto reschedule;
@@ -1978,15 +1985,16 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1978 unsigned int ioaddr = dev->base_addr; 1985 unsigned int ioaddr = dev->base_addr;
1979 u16 saved_bank = inw(ioaddr + BANK_SELECT); 1986 u16 saved_bank = inw(ioaddr + BANK_SELECT);
1980 int ret; 1987 int ret;
1988 unsigned long flags;
1981 1989
1982 spin_lock_irq(&smc->lock); 1990 spin_lock_irqsave(&smc->lock, flags);
1983 SMC_SELECT_BANK(3); 1991 SMC_SELECT_BANK(3);
1984 if (smc->cfg & CFG_MII_SELECT) 1992 if (smc->cfg & CFG_MII_SELECT)
1985 ret = mii_ethtool_gset(&smc->mii_if, ecmd); 1993 ret = mii_ethtool_gset(&smc->mii_if, ecmd);
1986 else 1994 else
1987 ret = smc_netdev_get_ecmd(dev, ecmd); 1995 ret = smc_netdev_get_ecmd(dev, ecmd);
1988 SMC_SELECT_BANK(saved_bank); 1996 SMC_SELECT_BANK(saved_bank);
1989 spin_unlock_irq(&smc->lock); 1997 spin_unlock_irqrestore(&smc->lock, flags);
1990 return ret; 1998 return ret;
1991} 1999}
1992 2000
@@ -1996,15 +2004,16 @@ static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1996 unsigned int ioaddr = dev->base_addr; 2004 unsigned int ioaddr = dev->base_addr;
1997 u16 saved_bank = inw(ioaddr + BANK_SELECT); 2005 u16 saved_bank = inw(ioaddr + BANK_SELECT);
1998 int ret; 2006 int ret;
2007 unsigned long flags;
1999 2008
2000 spin_lock_irq(&smc->lock); 2009 spin_lock_irqsave(&smc->lock, flags);
2001 SMC_SELECT_BANK(3); 2010 SMC_SELECT_BANK(3);
2002 if (smc->cfg & CFG_MII_SELECT) 2011 if (smc->cfg & CFG_MII_SELECT)
2003 ret = mii_ethtool_sset(&smc->mii_if, ecmd); 2012 ret = mii_ethtool_sset(&smc->mii_if, ecmd);
2004 else 2013 else
2005 ret = smc_netdev_set_ecmd(dev, ecmd); 2014 ret = smc_netdev_set_ecmd(dev, ecmd);
2006 SMC_SELECT_BANK(saved_bank); 2015 SMC_SELECT_BANK(saved_bank);
2007 spin_unlock_irq(&smc->lock); 2016 spin_unlock_irqrestore(&smc->lock, flags);
2008 return ret; 2017 return ret;
2009} 2018}
2010 2019
@@ -2014,12 +2023,13 @@ static u32 smc_get_link(struct net_device *dev)
2014 unsigned int ioaddr = dev->base_addr; 2023 unsigned int ioaddr = dev->base_addr;
2015 u16 saved_bank = inw(ioaddr + BANK_SELECT); 2024 u16 saved_bank = inw(ioaddr + BANK_SELECT);
2016 u32 ret; 2025 u32 ret;
2026 unsigned long flags;
2017 2027
2018 spin_lock_irq(&smc->lock); 2028 spin_lock_irqsave(&smc->lock, flags);
2019 SMC_SELECT_BANK(3); 2029 SMC_SELECT_BANK(3);
2020 ret = smc_link_ok(dev); 2030 ret = smc_link_ok(dev);
2021 SMC_SELECT_BANK(saved_bank); 2031 SMC_SELECT_BANK(saved_bank);
2022 spin_unlock_irq(&smc->lock); 2032 spin_unlock_irqrestore(&smc->lock, flags);
2023 return ret; 2033 return ret;
2024} 2034}
2025 2035
@@ -2056,16 +2066,17 @@ static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
2056 int rc = 0; 2066 int rc = 0;
2057 u16 saved_bank; 2067 u16 saved_bank;
2058 unsigned int ioaddr = dev->base_addr; 2068 unsigned int ioaddr = dev->base_addr;
2069 unsigned long flags;
2059 2070
2060 if (!netif_running(dev)) 2071 if (!netif_running(dev))
2061 return -EINVAL; 2072 return -EINVAL;
2062 2073
2063 spin_lock_irq(&smc->lock); 2074 spin_lock_irqsave(&smc->lock, flags);
2064 saved_bank = inw(ioaddr + BANK_SELECT); 2075 saved_bank = inw(ioaddr + BANK_SELECT);
2065 SMC_SELECT_BANK(3); 2076 SMC_SELECT_BANK(3);
2066 rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL); 2077 rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL);
2067 SMC_SELECT_BANK(saved_bank); 2078 SMC_SELECT_BANK(saved_bank);
2068 spin_unlock_irq(&smc->lock); 2079 spin_unlock_irqrestore(&smc->lock, flags);
2069 return rc; 2080 return rc;
2070} 2081}
2071 2082
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index fc5938ba3d78..a527e37728cd 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -88,6 +88,11 @@ config LSI_ET1011C_PHY
88 ---help--- 88 ---help---
89 Supports the LSI ET1011C PHY. 89 Supports the LSI ET1011C PHY.
90 90
91config MICREL_PHY
92 tristate "Driver for Micrel PHYs"
93 ---help---
94 Supports the KSZ9021, VSC8201, KS8001 PHYs.
95
91config FIXED_PHY 96config FIXED_PHY
92 bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" 97 bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
93 depends on PHYLIB=y 98 depends on PHYLIB=y
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 1342585af381..13bebab65d02 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -20,4 +20,5 @@ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
20obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o 20obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
21obj-$(CONFIG_NATIONAL_PHY) += national.o 21obj-$(CONFIG_NATIONAL_PHY) += national.o
22obj-$(CONFIG_STE10XP) += ste10Xp.o 22obj-$(CONFIG_STE10XP) += ste10Xp.o
23obj-$(CONFIG_MICREL_PHY) += micrel.o
23obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o 24obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
new file mode 100644
index 000000000000..68dd107bad24
--- /dev/null
+++ b/drivers/net/phy/micrel.c
@@ -0,0 +1,113 @@
1/*
2 * drivers/net/phy/micrel.c
3 *
4 * Driver for Micrel PHYs
5 *
6 * Author: David J. Choi
7 *
8 * Copyright (c) 2010 Micrel, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 * Support : ksz9021 , vsc8201, ks8001
16 */
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/phy.h>
21
22#define PHY_ID_KSZ9021 0x00221611
23#define PHY_ID_VSC8201 0x000FC413
24#define PHY_ID_KS8001 0x0022161A
25
26
27static int kszphy_config_init(struct phy_device *phydev)
28{
29 return 0;
30}
31
32
33static struct phy_driver ks8001_driver = {
34 .phy_id = PHY_ID_KS8001,
35 .phy_id_mask = 0x00fffff0,
36 .features = PHY_BASIC_FEATURES,
37 .flags = PHY_POLL,
38 .config_init = kszphy_config_init,
39 .config_aneg = genphy_config_aneg,
40 .read_status = genphy_read_status,
41 .driver = { .owner = THIS_MODULE,},
42};
43
44static struct phy_driver vsc8201_driver = {
45 .phy_id = PHY_ID_VSC8201,
46 .name = "Micrel VSC8201",
47 .phy_id_mask = 0x00fffff0,
48 .features = PHY_BASIC_FEATURES,
49 .flags = PHY_POLL,
50 .config_init = kszphy_config_init,
51 .config_aneg = genphy_config_aneg,
52 .read_status = genphy_read_status,
53 .driver = { .owner = THIS_MODULE,},
54};
55
56static struct phy_driver ksz9021_driver = {
57 .phy_id = PHY_ID_KSZ9021,
58 .phy_id_mask = 0x000fff10,
59 .name = "Micrel KSZ9021 Gigabit PHY",
60 .features = PHY_GBIT_FEATURES | SUPPORTED_Pause,
61 .flags = PHY_POLL,
62 .config_init = kszphy_config_init,
63 .config_aneg = genphy_config_aneg,
64 .read_status = genphy_read_status,
65 .driver = { .owner = THIS_MODULE, },
66};
67
68static int __init ksphy_init(void)
69{
70 int ret;
71
72 ret = phy_driver_register(&ks8001_driver);
73 if (ret)
74 goto err1;
75 ret = phy_driver_register(&vsc8201_driver);
76 if (ret)
77 goto err2;
78
79 ret = phy_driver_register(&ksz9021_driver);
80 if (ret)
81 goto err3;
82 return 0;
83
84err3:
85 phy_driver_unregister(&vsc8201_driver);
86err2:
87 phy_driver_unregister(&ks8001_driver);
88err1:
89 return ret;
90}
91
92static void __exit ksphy_exit(void)
93{
94 phy_driver_unregister(&ks8001_driver);
95 phy_driver_unregister(&vsc8201_driver);
96 phy_driver_unregister(&ksz9021_driver);
97}
98
99module_init(ksphy_init);
100module_exit(ksphy_exit);
101
102MODULE_DESCRIPTION("Micrel PHY driver");
103MODULE_AUTHOR("David J. Choi");
104MODULE_LICENSE("GPL");
105
106static struct mdio_device_id micrel_tbl[] = {
107 { PHY_ID_KSZ9021, 0x000fff10 },
108 { PHY_ID_VSC8201, 0x00fffff0 },
109 { PHY_ID_KS8001, 0x00fffff0 },
110 { }
111};
112
113MODULE_DEVICE_TABLE(mdio, micrel_tbl);
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 35f195329fdd..5441688daba7 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -405,6 +405,7 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
405 DECLARE_WAITQUEUE(wait, current); 405 DECLARE_WAITQUEUE(wait, current);
406 ssize_t ret; 406 ssize_t ret;
407 struct sk_buff *skb = NULL; 407 struct sk_buff *skb = NULL;
408 struct iovec iov;
408 409
409 ret = count; 410 ret = count;
410 411
@@ -448,7 +449,9 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
448 if (skb->len > count) 449 if (skb->len > count)
449 goto outf; 450 goto outf;
450 ret = -EFAULT; 451 ret = -EFAULT;
451 if (copy_to_user(buf, skb->data, skb->len)) 452 iov.iov_base = buf;
453 iov.iov_len = count;
454 if (skb_copy_datagram_iovec(skb, 0, &iov, skb->len))
452 goto outf; 455 goto outf;
453 ret = skb->len; 456 ret = skb->len;
454 457
@@ -1567,13 +1570,22 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
1567 struct channel *pch = chan->ppp; 1570 struct channel *pch = chan->ppp;
1568 int proto; 1571 int proto;
1569 1572
1570 if (!pch || skb->len == 0) { 1573 if (!pch) {
1571 kfree_skb(skb); 1574 kfree_skb(skb);
1572 return; 1575 return;
1573 } 1576 }
1574 1577
1575 proto = PPP_PROTO(skb);
1576 read_lock_bh(&pch->upl); 1578 read_lock_bh(&pch->upl);
1579 if (!pskb_may_pull(skb, 2)) {
1580 kfree_skb(skb);
1581 if (pch->ppp) {
1582 ++pch->ppp->dev->stats.rx_length_errors;
1583 ppp_receive_error(pch->ppp);
1584 }
1585 goto done;
1586 }
1587
1588 proto = PPP_PROTO(skb);
1577 if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) { 1589 if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
1578 /* put it on the channel queue */ 1590 /* put it on the channel queue */
1579 skb_queue_tail(&pch->file.rq, skb); 1591 skb_queue_tail(&pch->file.rq, skb);
@@ -1585,6 +1597,8 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
1585 } else { 1597 } else {
1586 ppp_do_recv(pch->ppp, skb, pch); 1598 ppp_do_recv(pch->ppp, skb, pch);
1587 } 1599 }
1600
1601done:
1588 read_unlock_bh(&pch->upl); 1602 read_unlock_bh(&pch->upl);
1589} 1603}
1590 1604
@@ -1617,7 +1631,8 @@ ppp_input_error(struct ppp_channel *chan, int code)
1617static void 1631static void
1618ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 1632ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1619{ 1633{
1620 if (pskb_may_pull(skb, 2)) { 1634 /* note: a 0-length skb is used as an error indication */
1635 if (skb->len > 0) {
1621#ifdef CONFIG_PPP_MULTILINK 1636#ifdef CONFIG_PPP_MULTILINK
1622 /* XXX do channel-level decompression here */ 1637 /* XXX do channel-level decompression here */
1623 if (PPP_PROTO(skb) == PPP_MP) 1638 if (PPP_PROTO(skb) == PPP_MP)
@@ -1625,15 +1640,10 @@ ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1625 else 1640 else
1626#endif /* CONFIG_PPP_MULTILINK */ 1641#endif /* CONFIG_PPP_MULTILINK */
1627 ppp_receive_nonmp_frame(ppp, skb); 1642 ppp_receive_nonmp_frame(ppp, skb);
1628 return; 1643 } else {
1644 kfree_skb(skb);
1645 ppp_receive_error(ppp);
1629 } 1646 }
1630
1631 if (skb->len > 0)
1632 /* note: a 0-length skb is used as an error indication */
1633 ++ppp->dev->stats.rx_length_errors;
1634
1635 kfree_skb(skb);
1636 ppp_receive_error(ppp);
1637} 1647}
1638 1648
1639static void 1649static void
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index cdd11ba100ea..99f031a08a01 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -258,7 +258,7 @@ static inline struct pppox_sock *get_item_by_addr(struct net *net,
258 dev = dev_get_by_name_rcu(net, sp->sa_addr.pppoe.dev); 258 dev = dev_get_by_name_rcu(net, sp->sa_addr.pppoe.dev);
259 if (dev) { 259 if (dev) {
260 ifindex = dev->ifindex; 260 ifindex = dev->ifindex;
261 pn = net_generic(net, pppoe_net_id); 261 pn = pppoe_pernet(net);
262 pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid, 262 pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
263 sp->sa_addr.pppoe.remote, ifindex); 263 sp->sa_addr.pppoe.remote, ifindex);
264 } 264 }
@@ -290,12 +290,6 @@ static void pppoe_flush_dev(struct net_device *dev)
290 struct pppoe_net *pn; 290 struct pppoe_net *pn;
291 int i; 291 int i;
292 292
293 BUG_ON(dev == NULL);
294
295 pn = pppoe_pernet(dev_net(dev));
296 if (!pn) /* already freed */
297 return;
298
299 write_lock_bh(&pn->hash_lock); 293 write_lock_bh(&pn->hash_lock);
300 for (i = 0; i < PPPOE_HASH_SIZE; i++) { 294 for (i = 0; i < PPPOE_HASH_SIZE; i++) {
301 struct pppox_sock *po = pn->hash_table[i]; 295 struct pppox_sock *po = pn->hash_table[i];
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 28c148cbe37b..2fba9cd5946f 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -51,8 +51,8 @@
51 51
52#define _QLCNIC_LINUX_MAJOR 5 52#define _QLCNIC_LINUX_MAJOR 5
53#define _QLCNIC_LINUX_MINOR 0 53#define _QLCNIC_LINUX_MINOR 0
54#define _QLCNIC_LINUX_SUBVERSION 1 54#define _QLCNIC_LINUX_SUBVERSION 2
55#define QLCNIC_LINUX_VERSIONID "5.0.1" 55#define QLCNIC_LINUX_VERSIONID "5.0.2"
56 56
57#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) 57#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
58#define _major(v) (((v) >> 24) & 0xff) 58#define _major(v) (((v) >> 24) & 0xff)
@@ -428,6 +428,10 @@ struct qlcnic_adapter_stats {
428 u64 xmit_on; 428 u64 xmit_on;
429 u64 xmit_off; 429 u64 xmit_off;
430 u64 skb_alloc_failure; 430 u64 skb_alloc_failure;
431 u64 null_skb;
432 u64 null_rxbuf;
433 u64 rx_dma_map_error;
434 u64 tx_dma_map_error;
431}; 435};
432 436
433/* 437/*
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 08d6f105371f..6cdc5ebb7411 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -69,6 +69,14 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
69 QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)}, 69 QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
70 {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure), 70 {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
71 QLC_OFF(stats.skb_alloc_failure)}, 71 QLC_OFF(stats.skb_alloc_failure)},
72 {"null skb",
73 QLC_SIZEOF(stats.null_skb), QLC_OFF(stats.null_skb)},
74 {"null rxbuf",
75 QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
76 {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
77 QLC_OFF(stats.rx_dma_map_error)},
78 {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
79 QLC_OFF(stats.tx_dma_map_error)},
72 80
73}; 81};
74 82
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index 51fa3fbcf58a..a984cd227582 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -694,17 +694,24 @@ enum {
694#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144)) 694#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
695#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148)) 695#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
696#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c)) 696#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
697#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x14c)) 697#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174))
698#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c) 698#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c)
699#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860) 699#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860)
700 700
701 /* Device State */ 701/* Device State */
702#define QLCNIC_DEV_COLD 1 702#define QLCNIC_DEV_COLD 0x1
703#define QLCNIC_DEV_INITALIZING 2 703#define QLCNIC_DEV_INITIALIZING 0x2
704#define QLCNIC_DEV_READY 3 704#define QLCNIC_DEV_READY 0x3
705#define QLCNIC_DEV_NEED_RESET 4 705#define QLCNIC_DEV_NEED_RESET 0x4
706#define QLCNIC_DEV_NEED_QUISCENT 5 706#define QLCNIC_DEV_NEED_QUISCENT 0x5
707#define QLCNIC_DEV_FAILED 6 707#define QLCNIC_DEV_FAILED 0x6
708#define QLCNIC_DEV_QUISCENT 0x7
709
710#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
711#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
712#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
713#define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4)))
714#define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4)))
708 715
709#define QLCNIC_RCODE_DRIVER_INFO 0x20000000 716#define QLCNIC_RCODE_DRIVER_INFO 0x20000000
710#define QLCNIC_RCODE_DRIVER_CAN_RELOAD 0x40000000 717#define QLCNIC_RCODE_DRIVER_CAN_RELOAD 0x40000000
@@ -712,9 +719,8 @@ enum {
712#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff) 719#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff)
713#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff) 720#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
714 721
715#define FW_POLL_DELAY (2 * HZ) 722#define FW_POLL_DELAY (1 * HZ)
716#define FW_FAIL_THRESH 3 723#define FW_FAIL_THRESH 2
717#define FW_POLL_THRESH 10
718 724
719#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC))) 725#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
720#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200) 726#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 01ce74ee99f9..1b621ca13e25 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -1287,6 +1287,7 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
1287 rds_ring->dma_size, PCI_DMA_FROMDEVICE); 1287 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
1288 1288
1289 if (pci_dma_mapping_error(pdev, dma)) { 1289 if (pci_dma_mapping_error(pdev, dma)) {
1290 adapter->stats.rx_dma_map_error++;
1290 dev_kfree_skb_any(skb); 1291 dev_kfree_skb_any(skb);
1291 buffer->skb = NULL; 1292 buffer->skb = NULL;
1292 return -ENOMEM; 1293 return -ENOMEM;
@@ -1311,8 +1312,10 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1311 PCI_DMA_FROMDEVICE); 1312 PCI_DMA_FROMDEVICE);
1312 1313
1313 skb = buffer->skb; 1314 skb = buffer->skb;
1314 if (!skb) 1315 if (!skb) {
1316 adapter->stats.null_skb++;
1315 goto no_skb; 1317 goto no_skb;
1318 }
1316 1319
1317 if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) { 1320 if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
1318 adapter->stats.csummed++; 1321 adapter->stats.csummed++;
@@ -1502,6 +1505,8 @@ qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
1502 1505
1503 if (rxbuf) 1506 if (rxbuf)
1504 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); 1507 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1508 else
1509 adapter->stats.null_rxbuf++;
1505 1510
1506skip: 1511skip:
1507 for (; desc_cnt > 0; desc_cnt--) { 1512 for (; desc_cnt > 0; desc_cnt--) {
@@ -1549,9 +1554,10 @@ qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
1549 int producer, count = 0; 1554 int producer, count = 0;
1550 struct list_head *head; 1555 struct list_head *head;
1551 1556
1557 spin_lock(&rds_ring->lock);
1558
1552 producer = rds_ring->producer; 1559 producer = rds_ring->producer;
1553 1560
1554 spin_lock(&rds_ring->lock);
1555 head = &rds_ring->free_list; 1561 head = &rds_ring->free_list;
1556 while (!list_empty(head)) { 1562 while (!list_empty(head)) {
1557 1563
@@ -1573,13 +1579,13 @@ qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
1573 1579
1574 producer = get_next_index(producer, rds_ring->num_desc); 1580 producer = get_next_index(producer, rds_ring->num_desc);
1575 } 1581 }
1576 spin_unlock(&rds_ring->lock);
1577 1582
1578 if (count) { 1583 if (count) {
1579 rds_ring->producer = producer; 1584 rds_ring->producer = producer;
1580 writel((producer-1) & (rds_ring->num_desc-1), 1585 writel((producer-1) & (rds_ring->num_desc-1),
1581 rds_ring->crb_rcv_producer); 1586 rds_ring->crb_rcv_producer);
1582 } 1587 }
1588 spin_unlock(&rds_ring->lock);
1583} 1589}
1584 1590
1585static void 1591static void
@@ -1591,10 +1597,11 @@ qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
1591 int producer, count = 0; 1597 int producer, count = 0;
1592 struct list_head *head; 1598 struct list_head *head;
1593 1599
1594 producer = rds_ring->producer;
1595 if (!spin_trylock(&rds_ring->lock)) 1600 if (!spin_trylock(&rds_ring->lock))
1596 return; 1601 return;
1597 1602
1603 producer = rds_ring->producer;
1604
1598 head = &rds_ring->free_list; 1605 head = &rds_ring->free_list;
1599 while (!list_empty(head)) { 1606 while (!list_empty(head)) {
1600 1607
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index e4fd5dcdfb4c..ee573fe52a8e 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -208,6 +208,9 @@ qlcnic_napi_enable(struct qlcnic_adapter *adapter)
208 struct qlcnic_host_sds_ring *sds_ring; 208 struct qlcnic_host_sds_ring *sds_ring;
209 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 209 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
210 210
211 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
212 return;
213
211 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 214 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
212 sds_ring = &recv_ctx->sds_rings[ring]; 215 sds_ring = &recv_ctx->sds_rings[ring];
213 napi_enable(&sds_ring->napi); 216 napi_enable(&sds_ring->napi);
@@ -222,6 +225,9 @@ qlcnic_napi_disable(struct qlcnic_adapter *adapter)
222 struct qlcnic_host_sds_ring *sds_ring; 225 struct qlcnic_host_sds_ring *sds_ring;
223 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 226 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
224 227
228 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
229 return;
230
225 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 231 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
226 sds_ring = &recv_ctx->sds_rings[ring]; 232 sds_ring = &recv_ctx->sds_rings[ring];
227 qlcnic_disable_int(sds_ring); 233 qlcnic_disable_int(sds_ring);
@@ -1573,6 +1579,11 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1573 int frag_count, no_of_desc; 1579 int frag_count, no_of_desc;
1574 u32 num_txd = tx_ring->num_desc; 1580 u32 num_txd = tx_ring->num_desc;
1575 1581
1582 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
1583 netif_stop_queue(netdev);
1584 return NETDEV_TX_BUSY;
1585 }
1586
1576 frag_count = skb_shinfo(skb)->nr_frags + 1; 1587 frag_count = skb_shinfo(skb)->nr_frags + 1;
1577 1588
1578 /* 4 fragments per cmd des */ 1589 /* 4 fragments per cmd des */
@@ -1589,8 +1600,10 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1589 1600
1590 pdev = adapter->pdev; 1601 pdev = adapter->pdev;
1591 1602
1592 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) 1603 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
1604 adapter->stats.tx_dma_map_error++;
1593 goto drop_packet; 1605 goto drop_packet;
1606 }
1594 1607
1595 pbuf->skb = skb; 1608 pbuf->skb = skb;
1596 pbuf->frag_count = frag_count; 1609 pbuf->frag_count = frag_count;
@@ -1947,8 +1960,8 @@ static void qlcnic_poll_controller(struct net_device *netdev)
1947} 1960}
1948#endif 1961#endif
1949 1962
1950static void 1963static int
1951qlcnic_set_drv_state(struct qlcnic_adapter *adapter, int state) 1964qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
1952{ 1965{
1953 u32 val; 1966 u32 val;
1954 1967
@@ -1956,18 +1969,20 @@ qlcnic_set_drv_state(struct qlcnic_adapter *adapter, int state)
1956 state != QLCNIC_DEV_NEED_QUISCENT); 1969 state != QLCNIC_DEV_NEED_QUISCENT);
1957 1970
1958 if (qlcnic_api_lock(adapter)) 1971 if (qlcnic_api_lock(adapter))
1959 return ; 1972 return -EIO;
1960 1973
1961 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); 1974 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1962 1975
1963 if (state == QLCNIC_DEV_NEED_RESET) 1976 if (state == QLCNIC_DEV_NEED_RESET)
1964 val |= ((u32)0x1 << (adapter->portnum * 4)); 1977 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
1965 else if (state == QLCNIC_DEV_NEED_QUISCENT) 1978 else if (state == QLCNIC_DEV_NEED_QUISCENT)
1966 val |= ((u32)0x1 << ((adapter->portnum * 4) + 1)); 1979 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
1967 1980
1968 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); 1981 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1969 1982
1970 qlcnic_api_unlock(adapter); 1983 qlcnic_api_unlock(adapter);
1984
1985 return 0;
1971} 1986}
1972 1987
1973static int 1988static int
@@ -1979,7 +1994,7 @@ qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
1979 return -EBUSY; 1994 return -EBUSY;
1980 1995
1981 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); 1996 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1982 val &= ~((u32)0x3 << (adapter->portnum * 4)); 1997 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1983 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); 1998 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1984 1999
1985 qlcnic_api_unlock(adapter); 2000 qlcnic_api_unlock(adapter);
@@ -1996,14 +2011,14 @@ qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
1996 goto err; 2011 goto err;
1997 2012
1998 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT); 2013 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
1999 val &= ~((u32)0x1 << (adapter->portnum * 4)); 2014 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
2000 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val); 2015 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
2001 2016
2002 if (!(val & 0x11111111)) 2017 if (!(val & 0x11111111))
2003 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD); 2018 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2004 2019
2005 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); 2020 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2006 val &= ~((u32)0x3 << (adapter->portnum * 4)); 2021 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
2007 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); 2022 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2008 2023
2009 qlcnic_api_unlock(adapter); 2024 qlcnic_api_unlock(adapter);
@@ -2013,6 +2028,7 @@ err:
2013 clear_bit(__QLCNIC_RESETTING, &adapter->state); 2028 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2014} 2029}
2015 2030
2031/* Grab api lock, before checking state */
2016static int 2032static int
2017qlcnic_check_drv_state(struct qlcnic_adapter *adapter) 2033qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2018{ 2034{
@@ -2033,17 +2049,18 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2033{ 2049{
2034 u32 val, prev_state; 2050 u32 val, prev_state;
2035 u8 dev_init_timeo = adapter->dev_init_timeo; 2051 u8 dev_init_timeo = adapter->dev_init_timeo;
2036 int portnum = adapter->portnum; 2052 u8 portnum = adapter->portnum;
2053
2054 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2055 return 1;
2037 2056
2038 if (qlcnic_api_lock(adapter)) 2057 if (qlcnic_api_lock(adapter))
2039 return -1; 2058 return -1;
2040 2059
2041 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT); 2060 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
2042 if (!(val & ((int)0x1 << (portnum * 4)))) { 2061 if (!(val & (1 << (portnum * 4)))) {
2043 val |= ((u32)0x1 << (portnum * 4)); 2062 QLC_DEV_SET_REF_CNT(val, portnum);
2044 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val); 2063 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
2045 } else if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state)) {
2046 goto start_fw;
2047 } 2064 }
2048 2065
2049 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); 2066 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
@@ -2051,8 +2068,7 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2051 2068
2052 switch (prev_state) { 2069 switch (prev_state) {
2053 case QLCNIC_DEV_COLD: 2070 case QLCNIC_DEV_COLD:
2054start_fw: 2071 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
2055 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITALIZING);
2056 qlcnic_api_unlock(adapter); 2072 qlcnic_api_unlock(adapter);
2057 return 1; 2073 return 1;
2058 2074
@@ -2062,19 +2078,23 @@ start_fw:
2062 2078
2063 case QLCNIC_DEV_NEED_RESET: 2079 case QLCNIC_DEV_NEED_RESET:
2064 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); 2080 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2065 val |= ((u32)0x1 << (portnum * 4)); 2081 QLC_DEV_SET_RST_RDY(val, portnum);
2066 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); 2082 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2067 break; 2083 break;
2068 2084
2069 case QLCNIC_DEV_NEED_QUISCENT: 2085 case QLCNIC_DEV_NEED_QUISCENT:
2070 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); 2086 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2071 val |= ((u32)0x1 << ((portnum * 4) + 1)); 2087 QLC_DEV_SET_QSCNT_RDY(val, portnum);
2072 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); 2088 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2073 break; 2089 break;
2074 2090
2075 case QLCNIC_DEV_FAILED: 2091 case QLCNIC_DEV_FAILED:
2076 qlcnic_api_unlock(adapter); 2092 qlcnic_api_unlock(adapter);
2077 return -1; 2093 return -1;
2094
2095 case QLCNIC_DEV_INITIALIZING:
2096 case QLCNIC_DEV_QUISCENT:
2097 break;
2078 } 2098 }
2079 2099
2080 qlcnic_api_unlock(adapter); 2100 qlcnic_api_unlock(adapter);
@@ -2094,7 +2114,7 @@ start_fw:
2094 return -1; 2114 return -1;
2095 2115
2096 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); 2116 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2097 val &= ~((u32)0x3 << (portnum * 4)); 2117 QLC_DEV_CLR_RST_QSCNT(val, portnum);
2098 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); 2118 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2099 2119
2100 qlcnic_api_unlock(adapter); 2120 qlcnic_api_unlock(adapter);
@@ -2107,51 +2127,59 @@ qlcnic_fwinit_work(struct work_struct *work)
2107{ 2127{
2108 struct qlcnic_adapter *adapter = container_of(work, 2128 struct qlcnic_adapter *adapter = container_of(work,
2109 struct qlcnic_adapter, fw_work.work); 2129 struct qlcnic_adapter, fw_work.work);
2110 int dev_state; 2130 u32 dev_state = 0xf;
2111 2131
2112 if (test_bit(__QLCNIC_START_FW, &adapter->state)) { 2132 if (qlcnic_api_lock(adapter))
2133 goto err_ret;
2113 2134
2114 if (qlcnic_check_drv_state(adapter) && 2135 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2115 (adapter->fw_wait_cnt++ < adapter->reset_ack_timeo)) { 2136 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2116 qlcnic_schedule_work(adapter, 2137 adapter->reset_ack_timeo);
2117 qlcnic_fwinit_work, FW_POLL_DELAY); 2138 goto skip_ack_check;
2118 return; 2139 }
2140
2141 if (!qlcnic_check_drv_state(adapter)) {
2142skip_ack_check:
2143 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2144 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2145 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2146 QLCNIC_DEV_INITIALIZING);
2147 set_bit(__QLCNIC_START_FW, &adapter->state);
2148 QLCDB(adapter, DRV, "Restarting fw\n");
2119 } 2149 }
2120 2150
2121 QLCDB(adapter, DRV, "Resetting FW\n"); 2151 qlcnic_api_unlock(adapter);
2152
2122 if (!qlcnic_start_firmware(adapter)) { 2153 if (!qlcnic_start_firmware(adapter)) {
2123 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); 2154 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2124 return; 2155 return;
2125 } 2156 }
2126
2127 goto err_ret; 2157 goto err_ret;
2128 } 2158 }
2129 2159
2130 if (adapter->fw_wait_cnt++ > (adapter->dev_init_timeo / 2)) { 2160 qlcnic_api_unlock(adapter);
2131 dev_err(&adapter->pdev->dev,
2132 "Waiting for device to reset timeout\n");
2133 goto err_ret;
2134 }
2135 2161
2136 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); 2162 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2137 QLCDB(adapter, HW, "Func waiting: Device state=%d\n", dev_state); 2163 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
2138 2164
2139 switch (dev_state) { 2165 switch (dev_state) {
2140 case QLCNIC_DEV_READY: 2166 case QLCNIC_DEV_NEED_RESET:
2141 if (!qlcnic_start_firmware(adapter)) { 2167 qlcnic_schedule_work(adapter,
2142 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); 2168 qlcnic_fwinit_work, FW_POLL_DELAY);
2143 return; 2169 return;
2144 }
2145 case QLCNIC_DEV_FAILED: 2170 case QLCNIC_DEV_FAILED:
2146 break; 2171 break;
2147 2172
2148 default: 2173 default:
2149 qlcnic_schedule_work(adapter, 2174 if (!qlcnic_start_firmware(adapter)) {
2150 qlcnic_fwinit_work, 2 * FW_POLL_DELAY); 2175 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2151 return; 2176 return;
2177 }
2152 } 2178 }
2153 2179
2154err_ret: 2180err_ret:
2181 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2182 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
2155 netif_device_attach(adapter->netdev); 2183 netif_device_attach(adapter->netdev);
2156 qlcnic_clr_all_drv_state(adapter); 2184 qlcnic_clr_all_drv_state(adapter);
2157} 2185}
@@ -2180,7 +2208,8 @@ qlcnic_detach_work(struct work_struct *work)
2180 if (adapter->temp == QLCNIC_TEMP_PANIC) 2208 if (adapter->temp == QLCNIC_TEMP_PANIC)
2181 goto err_ret; 2209 goto err_ret;
2182 2210
2183 qlcnic_set_drv_state(adapter, adapter->dev_state); 2211 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2212 goto err_ret;
2184 2213
2185 adapter->fw_wait_cnt = 0; 2214 adapter->fw_wait_cnt = 0;
2186 2215
@@ -2196,6 +2225,7 @@ err_ret:
2196 2225
2197} 2226}
2198 2227
2228/*Transit to RESET state from READY state only */
2199static void 2229static void
2200qlcnic_dev_request_reset(struct qlcnic_adapter *adapter) 2230qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2201{ 2231{
@@ -2206,9 +2236,8 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2206 2236
2207 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); 2237 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2208 2238
2209 if (state != QLCNIC_DEV_INITALIZING && state != QLCNIC_DEV_NEED_RESET) { 2239 if (state == QLCNIC_DEV_READY) {
2210 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET); 2240 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
2211 set_bit(__QLCNIC_START_FW, &adapter->state);
2212 QLCDB(adapter, DRV, "NEED_RESET state set\n"); 2241 QLCDB(adapter, DRV, "NEED_RESET state set\n");
2213 } 2242 }
2214 2243
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 340da3915b96..217e709bda3e 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2775,6 +2775,7 @@ static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
2775{ 2775{
2776 iounmap(ioaddr); 2776 iounmap(ioaddr);
2777 pci_release_regions(pdev); 2777 pci_release_regions(pdev);
2778 pci_clear_mwi(pdev);
2778 pci_disable_device(pdev); 2779 pci_disable_device(pdev);
2779 free_netdev(dev); 2780 free_netdev(dev);
2780} 2781}
@@ -2841,8 +2842,13 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
2841 spin_lock_irq(&tp->lock); 2842 spin_lock_irq(&tp->lock);
2842 2843
2843 RTL_W8(Cfg9346, Cfg9346_Unlock); 2844 RTL_W8(Cfg9346, Cfg9346_Unlock);
2845
2844 RTL_W32(MAC4, high); 2846 RTL_W32(MAC4, high);
2847 RTL_R32(MAC4);
2848
2845 RTL_W32(MAC0, low); 2849 RTL_W32(MAC0, low);
2850 RTL_R32(MAC0);
2851
2846 RTL_W8(Cfg9346, Cfg9346_Lock); 2852 RTL_W8(Cfg9346, Cfg9346_Lock);
2847 2853
2848 spin_unlock_irq(&tp->lock); 2854 spin_unlock_irq(&tp->lock);
@@ -3030,9 +3036,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3030 goto err_out_free_dev_1; 3036 goto err_out_free_dev_1;
3031 } 3037 }
3032 3038
3033 rc = pci_set_mwi(pdev); 3039 if (pci_set_mwi(pdev) < 0)
3034 if (rc < 0) 3040 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
3035 goto err_out_disable_2;
3036 3041
3037 /* make sure PCI base addr 1 is MMIO */ 3042 /* make sure PCI base addr 1 is MMIO */
3038 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { 3043 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
@@ -3040,7 +3045,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3040 "region #%d not an MMIO resource, aborting\n", 3045 "region #%d not an MMIO resource, aborting\n",
3041 region); 3046 region);
3042 rc = -ENODEV; 3047 rc = -ENODEV;
3043 goto err_out_mwi_3; 3048 goto err_out_mwi_2;
3044 } 3049 }
3045 3050
3046 /* check for weird/broken PCI region reporting */ 3051 /* check for weird/broken PCI region reporting */
@@ -3048,13 +3053,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3048 netif_err(tp, probe, dev, 3053 netif_err(tp, probe, dev,
3049 "Invalid PCI region size(s), aborting\n"); 3054 "Invalid PCI region size(s), aborting\n");
3050 rc = -ENODEV; 3055 rc = -ENODEV;
3051 goto err_out_mwi_3; 3056 goto err_out_mwi_2;
3052 } 3057 }
3053 3058
3054 rc = pci_request_regions(pdev, MODULENAME); 3059 rc = pci_request_regions(pdev, MODULENAME);
3055 if (rc < 0) { 3060 if (rc < 0) {
3056 netif_err(tp, probe, dev, "could not request regions\n"); 3061 netif_err(tp, probe, dev, "could not request regions\n");
3057 goto err_out_mwi_3; 3062 goto err_out_mwi_2;
3058 } 3063 }
3059 3064
3060 tp->cp_cmd = PCIMulRW | RxChkSum; 3065 tp->cp_cmd = PCIMulRW | RxChkSum;
@@ -3067,7 +3072,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3067 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3072 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3068 if (rc < 0) { 3073 if (rc < 0) {
3069 netif_err(tp, probe, dev, "DMA configuration failed\n"); 3074 netif_err(tp, probe, dev, "DMA configuration failed\n");
3070 goto err_out_free_res_4; 3075 goto err_out_free_res_3;
3071 } 3076 }
3072 } 3077 }
3073 3078
@@ -3076,7 +3081,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3076 if (!ioaddr) { 3081 if (!ioaddr) {
3077 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); 3082 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
3078 rc = -EIO; 3083 rc = -EIO;
3079 goto err_out_free_res_4; 3084 goto err_out_free_res_3;
3080 } 3085 }
3081 3086
3082 tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); 3087 tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
@@ -3118,7 +3123,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3118 if (i == ARRAY_SIZE(rtl_chip_info)) { 3123 if (i == ARRAY_SIZE(rtl_chip_info)) {
3119 dev_err(&pdev->dev, 3124 dev_err(&pdev->dev,
3120 "driver bug, MAC version not found in rtl_chip_info\n"); 3125 "driver bug, MAC version not found in rtl_chip_info\n");
3121 goto err_out_msi_5; 3126 goto err_out_msi_4;
3122 } 3127 }
3123 tp->chipset = i; 3128 tp->chipset = i;
3124 3129
@@ -3183,7 +3188,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3183 3188
3184 rc = register_netdev(dev); 3189 rc = register_netdev(dev);
3185 if (rc < 0) 3190 if (rc < 0)
3186 goto err_out_msi_5; 3191 goto err_out_msi_4;
3187 3192
3188 pci_set_drvdata(pdev, dev); 3193 pci_set_drvdata(pdev, dev);
3189 3194
@@ -3212,14 +3217,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3212out: 3217out:
3213 return rc; 3218 return rc;
3214 3219
3215err_out_msi_5: 3220err_out_msi_4:
3216 rtl_disable_msi(pdev, tp); 3221 rtl_disable_msi(pdev, tp);
3217 iounmap(ioaddr); 3222 iounmap(ioaddr);
3218err_out_free_res_4: 3223err_out_free_res_3:
3219 pci_release_regions(pdev); 3224 pci_release_regions(pdev);
3220err_out_mwi_3: 3225err_out_mwi_2:
3221 pci_clear_mwi(pdev); 3226 pci_clear_mwi(pdev);
3222err_out_disable_2:
3223 pci_disable_device(pdev); 3227 pci_disable_device(pdev);
3224err_out_free_dev_1: 3228err_out_free_dev_1:
3225 free_netdev(dev); 3229 free_netdev(dev);
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 332031747a23..fec3c29b2ea8 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -48,23 +48,6 @@
48#include <asm/io.h> 48#include <asm/io.h>
49#include <asm/processor.h> /* Processor type for cache alignment. */ 49#include <asm/processor.h> /* Processor type for cache alignment. */
50 50
51/* This is only here until the firmware is ready. In that case,
52 the firmware leaves the ethernet address in the register for us. */
53#ifdef CONFIG_SIBYTE_STANDALONE
54#define SBMAC_ETH0_HWADDR "40:00:00:00:01:00"
55#define SBMAC_ETH1_HWADDR "40:00:00:00:01:01"
56#define SBMAC_ETH2_HWADDR "40:00:00:00:01:02"
57#define SBMAC_ETH3_HWADDR "40:00:00:00:01:03"
58#endif
59
60
61/* These identify the driver base version and may not be removed. */
62#if 0
63static char version1[] __initdata =
64"sb1250-mac.c:1.00 1/11/2001 Written by Mitch Lichtenberg\n";
65#endif
66
67
68/* Operational parameters that usually are not changed. */ 51/* Operational parameters that usually are not changed. */
69 52
70#define CONFIG_SBMAC_COALESCE 53#define CONFIG_SBMAC_COALESCE
@@ -349,7 +332,6 @@ static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
349 ********************************************************************* */ 332 ********************************************************************* */
350 333
351static char sbmac_string[] = "sb1250-mac"; 334static char sbmac_string[] = "sb1250-mac";
352static char sbmac_pretty[] = "SB1250 MAC";
353 335
354static char sbmac_mdio_string[] = "sb1250-mac-mdio"; 336static char sbmac_mdio_string[] = "sb1250-mac-mdio";
355 337
@@ -2182,85 +2164,6 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
2182 } 2164 }
2183} 2165}
2184 2166
2185#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
2186/**********************************************************************
2187 * SBMAC_PARSE_XDIGIT(str)
2188 *
2189 * Parse a hex digit, returning its value
2190 *
2191 * Input parameters:
2192 * str - character
2193 *
2194 * Return value:
2195 * hex value, or -1 if invalid
2196 ********************************************************************* */
2197
2198static int sbmac_parse_xdigit(char str)
2199{
2200 int digit;
2201
2202 if ((str >= '0') && (str <= '9'))
2203 digit = str - '0';
2204 else if ((str >= 'a') && (str <= 'f'))
2205 digit = str - 'a' + 10;
2206 else if ((str >= 'A') && (str <= 'F'))
2207 digit = str - 'A' + 10;
2208 else
2209 return -1;
2210
2211 return digit;
2212}
2213
2214/**********************************************************************
2215 * SBMAC_PARSE_HWADDR(str,hwaddr)
2216 *
2217 * Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte
2218 * Ethernet address.
2219 *
2220 * Input parameters:
2221 * str - string
2222 * hwaddr - pointer to hardware address
2223 *
2224 * Return value:
2225 * 0 if ok, else -1
2226 ********************************************************************* */
2227
2228static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr)
2229{
2230 int digit1,digit2;
2231 int idx = 6;
2232
2233 while (*str && (idx > 0)) {
2234 digit1 = sbmac_parse_xdigit(*str);
2235 if (digit1 < 0)
2236 return -1;
2237 str++;
2238 if (!*str)
2239 return -1;
2240
2241 if ((*str == ':') || (*str == '-')) {
2242 digit2 = digit1;
2243 digit1 = 0;
2244 }
2245 else {
2246 digit2 = sbmac_parse_xdigit(*str);
2247 if (digit2 < 0)
2248 return -1;
2249 str++;
2250 }
2251
2252 *hwaddr++ = (digit1 << 4) | digit2;
2253 idx--;
2254
2255 if (*str == '-')
2256 str++;
2257 if (*str == ':')
2258 str++;
2259 }
2260 return 0;
2261}
2262#endif
2263
2264static int sb1250_change_mtu(struct net_device *_dev, int new_mtu) 2167static int sb1250_change_mtu(struct net_device *_dev, int new_mtu)
2265{ 2168{
2266 if (new_mtu > ENET_PACKET_SIZE) 2169 if (new_mtu > ENET_PACKET_SIZE)
@@ -2353,17 +2256,36 @@ static int sbmac_init(struct platform_device *pldev, long long base)
2353 2256
2354 sc->mii_bus = mdiobus_alloc(); 2257 sc->mii_bus = mdiobus_alloc();
2355 if (sc->mii_bus == NULL) { 2258 if (sc->mii_bus == NULL) {
2356 sbmac_uninitctx(sc); 2259 err = -ENOMEM;
2357 return -ENOMEM; 2260 goto uninit_ctx;
2261 }
2262
2263 sc->mii_bus->name = sbmac_mdio_string;
2264 snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx);
2265 sc->mii_bus->priv = sc;
2266 sc->mii_bus->read = sbmac_mii_read;
2267 sc->mii_bus->write = sbmac_mii_write;
2268 sc->mii_bus->irq = sc->phy_irq;
2269 for (i = 0; i < PHY_MAX_ADDR; ++i)
2270 sc->mii_bus->irq[i] = SBMAC_PHY_INT;
2271
2272 sc->mii_bus->parent = &pldev->dev;
2273 /*
2274 * Probe PHY address
2275 */
2276 err = mdiobus_register(sc->mii_bus);
2277 if (err) {
2278 printk(KERN_ERR "%s: unable to register MDIO bus\n",
2279 dev->name);
2280 goto free_mdio;
2358 } 2281 }
2282 dev_set_drvdata(&pldev->dev, sc->mii_bus);
2359 2283
2360 err = register_netdev(dev); 2284 err = register_netdev(dev);
2361 if (err) { 2285 if (err) {
2362 printk(KERN_ERR "%s.%d: unable to register netdev\n", 2286 printk(KERN_ERR "%s.%d: unable to register netdev\n",
2363 sbmac_string, idx); 2287 sbmac_string, idx);
2364 mdiobus_free(sc->mii_bus); 2288 goto unreg_mdio;
2365 sbmac_uninitctx(sc);
2366 return err;
2367 } 2289 }
2368 2290
2369 pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name); 2291 pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name);
@@ -2379,19 +2301,15 @@ static int sbmac_init(struct platform_device *pldev, long long base)
2379 pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n", 2301 pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n",
2380 dev->name, base, eaddr); 2302 dev->name, base, eaddr);
2381 2303
2382 sc->mii_bus->name = sbmac_mdio_string;
2383 snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx);
2384 sc->mii_bus->priv = sc;
2385 sc->mii_bus->read = sbmac_mii_read;
2386 sc->mii_bus->write = sbmac_mii_write;
2387 sc->mii_bus->irq = sc->phy_irq;
2388 for (i = 0; i < PHY_MAX_ADDR; ++i)
2389 sc->mii_bus->irq[i] = SBMAC_PHY_INT;
2390
2391 sc->mii_bus->parent = &pldev->dev;
2392 dev_set_drvdata(&pldev->dev, sc->mii_bus);
2393
2394 return 0; 2304 return 0;
2305unreg_mdio:
2306 mdiobus_unregister(sc->mii_bus);
2307 dev_set_drvdata(&pldev->dev, NULL);
2308free_mdio:
2309 mdiobus_free(sc->mii_bus);
2310uninit_ctx:
2311 sbmac_uninitctx(sc);
2312 return err;
2395} 2313}
2396 2314
2397 2315
@@ -2417,16 +2335,6 @@ static int sbmac_open(struct net_device *dev)
2417 goto out_err; 2335 goto out_err;
2418 } 2336 }
2419 2337
2420 /*
2421 * Probe PHY address
2422 */
2423 err = mdiobus_register(sc->mii_bus);
2424 if (err) {
2425 printk(KERN_ERR "%s: unable to register MDIO bus\n",
2426 dev->name);
2427 goto out_unirq;
2428 }
2429
2430 sc->sbm_speed = sbmac_speed_none; 2338 sc->sbm_speed = sbmac_speed_none;
2431 sc->sbm_duplex = sbmac_duplex_none; 2339 sc->sbm_duplex = sbmac_duplex_none;
2432 sc->sbm_fc = sbmac_fc_none; 2340 sc->sbm_fc = sbmac_fc_none;
@@ -2457,11 +2365,7 @@ static int sbmac_open(struct net_device *dev)
2457 return 0; 2365 return 0;
2458 2366
2459out_unregister: 2367out_unregister:
2460 mdiobus_unregister(sc->mii_bus);
2461
2462out_unirq:
2463 free_irq(dev->irq, dev); 2368 free_irq(dev->irq, dev);
2464
2465out_err: 2369out_err:
2466 return err; 2370 return err;
2467} 2371}
@@ -2650,9 +2554,6 @@ static int sbmac_close(struct net_device *dev)
2650 2554
2651 phy_disconnect(sc->phy_dev); 2555 phy_disconnect(sc->phy_dev);
2652 sc->phy_dev = NULL; 2556 sc->phy_dev = NULL;
2653
2654 mdiobus_unregister(sc->mii_bus);
2655
2656 free_irq(dev->irq, dev); 2557 free_irq(dev->irq, dev);
2657 2558
2658 sbdma_emptyring(&(sc->sbm_txdma)); 2559 sbdma_emptyring(&(sc->sbm_txdma));
@@ -2759,6 +2660,7 @@ static int __exit sbmac_remove(struct platform_device *pldev)
2759 2660
2760 unregister_netdev(dev); 2661 unregister_netdev(dev);
2761 sbmac_uninitctx(sc); 2662 sbmac_uninitctx(sc);
2663 mdiobus_unregister(sc->mii_bus);
2762 mdiobus_free(sc->mii_bus); 2664 mdiobus_free(sc->mii_bus);
2763 iounmap(sc->sbm_base); 2665 iounmap(sc->sbm_base);
2764 free_netdev(dev); 2666 free_netdev(dev);
@@ -2766,162 +2668,6 @@ static int __exit sbmac_remove(struct platform_device *pldev)
2766 return 0; 2668 return 0;
2767} 2669}
2768 2670
2769
2770static struct platform_device **sbmac_pldev;
2771static int sbmac_max_units;
2772
2773#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
2774static void __init sbmac_setup_hwaddr(int idx, char *addr)
2775{
2776 void __iomem *sbm_base;
2777 unsigned long start, end;
2778 uint8_t eaddr[6];
2779 uint64_t val;
2780
2781 if (idx >= sbmac_max_units)
2782 return;
2783
2784 start = A_MAC_CHANNEL_BASE(idx);
2785 end = A_MAC_CHANNEL_BASE(idx + 1) - 1;
2786
2787 sbm_base = ioremap_nocache(start, end - start + 1);
2788 if (!sbm_base) {
2789 printk(KERN_ERR "%s: unable to map device registers\n",
2790 sbmac_string);
2791 return;
2792 }
2793
2794 sbmac_parse_hwaddr(addr, eaddr);
2795 val = sbmac_addr2reg(eaddr);
2796 __raw_writeq(val, sbm_base + R_MAC_ETHERNET_ADDR);
2797 val = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR);
2798
2799 iounmap(sbm_base);
2800}
2801#endif
2802
2803static int __init sbmac_platform_probe_one(int idx)
2804{
2805 struct platform_device *pldev;
2806 struct {
2807 struct resource r;
2808 char name[strlen(sbmac_pretty) + 4];
2809 } *res;
2810 int err;
2811
2812 res = kzalloc(sizeof(*res), GFP_KERNEL);
2813 if (!res) {
2814 printk(KERN_ERR "%s.%d: unable to allocate memory\n",
2815 sbmac_string, idx);
2816 err = -ENOMEM;
2817 goto out_err;
2818 }
2819
2820 /*
2821 * This is the base address of the MAC.
2822 */
2823 snprintf(res->name, sizeof(res->name), "%s %d", sbmac_pretty, idx);
2824 res->r.name = res->name;
2825 res->r.flags = IORESOURCE_MEM;
2826 res->r.start = A_MAC_CHANNEL_BASE(idx);
2827 res->r.end = A_MAC_CHANNEL_BASE(idx + 1) - 1;
2828
2829 pldev = platform_device_register_simple(sbmac_string, idx, &res->r, 1);
2830 if (IS_ERR(pldev)) {
2831 printk(KERN_ERR "%s.%d: unable to register platform device\n",
2832 sbmac_string, idx);
2833 err = PTR_ERR(pldev);
2834 goto out_kfree;
2835 }
2836
2837 if (!pldev->dev.driver) {
2838 err = 0; /* No hardware at this address. */
2839 goto out_unregister;
2840 }
2841
2842 sbmac_pldev[idx] = pldev;
2843 return 0;
2844
2845out_unregister:
2846 platform_device_unregister(pldev);
2847
2848out_kfree:
2849 kfree(res);
2850
2851out_err:
2852 return err;
2853}
2854
2855static void __init sbmac_platform_probe(void)
2856{
2857 int i;
2858
2859 /* Set the number of available units based on the SOC type. */
2860 switch (soc_type) {
2861 case K_SYS_SOC_TYPE_BCM1250:
2862 case K_SYS_SOC_TYPE_BCM1250_ALT:
2863 sbmac_max_units = 3;
2864 break;
2865 case K_SYS_SOC_TYPE_BCM1120:
2866 case K_SYS_SOC_TYPE_BCM1125:
2867 case K_SYS_SOC_TYPE_BCM1125H:
2868 case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */
2869 sbmac_max_units = 2;
2870 break;
2871 case K_SYS_SOC_TYPE_BCM1x55:
2872 case K_SYS_SOC_TYPE_BCM1x80:
2873 sbmac_max_units = 4;
2874 break;
2875 default:
2876 return; /* none */
2877 }
2878
2879 /*
2880 * For bringup when not using the firmware, we can pre-fill
2881 * the MAC addresses using the environment variables
2882 * specified in this file (or maybe from the config file?)
2883 */
2884#ifdef SBMAC_ETH0_HWADDR
2885 sbmac_setup_hwaddr(0, SBMAC_ETH0_HWADDR);
2886#endif
2887#ifdef SBMAC_ETH1_HWADDR
2888 sbmac_setup_hwaddr(1, SBMAC_ETH1_HWADDR);
2889#endif
2890#ifdef SBMAC_ETH2_HWADDR
2891 sbmac_setup_hwaddr(2, SBMAC_ETH2_HWADDR);
2892#endif
2893#ifdef SBMAC_ETH3_HWADDR
2894 sbmac_setup_hwaddr(3, SBMAC_ETH3_HWADDR);
2895#endif
2896
2897 sbmac_pldev = kcalloc(sbmac_max_units, sizeof(*sbmac_pldev),
2898 GFP_KERNEL);
2899 if (!sbmac_pldev) {
2900 printk(KERN_ERR "%s: unable to allocate memory\n",
2901 sbmac_string);
2902 return;
2903 }
2904
2905 /*
2906 * Walk through the Ethernet controllers and find
2907 * those who have their MAC addresses set.
2908 */
2909 for (i = 0; i < sbmac_max_units; i++)
2910 if (sbmac_platform_probe_one(i))
2911 break;
2912}
2913
2914
2915static void __exit sbmac_platform_cleanup(void)
2916{
2917 int i;
2918
2919 for (i = 0; i < sbmac_max_units; i++)
2920 platform_device_unregister(sbmac_pldev[i]);
2921 kfree(sbmac_pldev);
2922}
2923
2924
2925static struct platform_driver sbmac_driver = { 2671static struct platform_driver sbmac_driver = {
2926 .probe = sbmac_probe, 2672 .probe = sbmac_probe,
2927 .remove = __exit_p(sbmac_remove), 2673 .remove = __exit_p(sbmac_remove),
@@ -2932,20 +2678,11 @@ static struct platform_driver sbmac_driver = {
2932 2678
2933static int __init sbmac_init_module(void) 2679static int __init sbmac_init_module(void)
2934{ 2680{
2935 int err; 2681 return platform_driver_register(&sbmac_driver);
2936
2937 err = platform_driver_register(&sbmac_driver);
2938 if (err)
2939 return err;
2940
2941 sbmac_platform_probe();
2942
2943 return err;
2944} 2682}
2945 2683
2946static void __exit sbmac_cleanup_module(void) 2684static void __exit sbmac_cleanup_module(void)
2947{ 2685{
2948 sbmac_platform_cleanup();
2949 platform_driver_unregister(&sbmac_driver); 2686 platform_driver_unregister(&sbmac_driver);
2950} 2687}
2951 2688
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 1ad61b7bba40..156460527231 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -225,17 +225,17 @@ static void efx_fini_channels(struct efx_nic *efx);
225 * never be concurrently called more than once on the same channel, 225 * never be concurrently called more than once on the same channel,
226 * though different channels may be being processed concurrently. 226 * though different channels may be being processed concurrently.
227 */ 227 */
228static int efx_process_channel(struct efx_channel *channel, int rx_quota) 228static int efx_process_channel(struct efx_channel *channel, int budget)
229{ 229{
230 struct efx_nic *efx = channel->efx; 230 struct efx_nic *efx = channel->efx;
231 int rx_packets; 231 int spent;
232 232
233 if (unlikely(efx->reset_pending != RESET_TYPE_NONE || 233 if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
234 !channel->enabled)) 234 !channel->enabled))
235 return 0; 235 return 0;
236 236
237 rx_packets = efx_nic_process_eventq(channel, rx_quota); 237 spent = efx_nic_process_eventq(channel, budget);
238 if (rx_packets == 0) 238 if (spent == 0)
239 return 0; 239 return 0;
240 240
241 /* Deliver last RX packet. */ 241 /* Deliver last RX packet. */
@@ -249,7 +249,7 @@ static int efx_process_channel(struct efx_channel *channel, int rx_quota)
249 249
250 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); 250 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
251 251
252 return rx_packets; 252 return spent;
253} 253}
254 254
255/* Mark channel as finished processing 255/* Mark channel as finished processing
@@ -278,17 +278,17 @@ static int efx_poll(struct napi_struct *napi, int budget)
278{ 278{
279 struct efx_channel *channel = 279 struct efx_channel *channel =
280 container_of(napi, struct efx_channel, napi_str); 280 container_of(napi, struct efx_channel, napi_str);
281 int rx_packets; 281 int spent;
282 282
283 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n", 283 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
284 channel->channel, raw_smp_processor_id()); 284 channel->channel, raw_smp_processor_id());
285 285
286 rx_packets = efx_process_channel(channel, budget); 286 spent = efx_process_channel(channel, budget);
287 287
288 if (rx_packets < budget) { 288 if (spent < budget) {
289 struct efx_nic *efx = channel->efx; 289 struct efx_nic *efx = channel->efx;
290 290
291 if (channel->used_flags & EFX_USED_BY_RX && 291 if (channel->channel < efx->n_rx_channels &&
292 efx->irq_rx_adaptive && 292 efx->irq_rx_adaptive &&
293 unlikely(++channel->irq_count == 1000)) { 293 unlikely(++channel->irq_count == 1000)) {
294 if (unlikely(channel->irq_mod_score < 294 if (unlikely(channel->irq_mod_score <
@@ -318,7 +318,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
318 efx_channel_processed(channel); 318 efx_channel_processed(channel);
319 } 319 }
320 320
321 return rx_packets; 321 return spent;
322} 322}
323 323
324/* Process the eventq of the specified channel immediately on this CPU 324/* Process the eventq of the specified channel immediately on this CPU
@@ -333,7 +333,6 @@ void efx_process_channel_now(struct efx_channel *channel)
333{ 333{
334 struct efx_nic *efx = channel->efx; 334 struct efx_nic *efx = channel->efx;
335 335
336 BUG_ON(!channel->used_flags);
337 BUG_ON(!channel->enabled); 336 BUG_ON(!channel->enabled);
338 337
339 /* Disable interrupts and wait for ISRs to complete */ 338 /* Disable interrupts and wait for ISRs to complete */
@@ -446,12 +445,12 @@ static void efx_set_channel_names(struct efx_nic *efx)
446 445
447 efx_for_each_channel(channel, efx) { 446 efx_for_each_channel(channel, efx) {
448 number = channel->channel; 447 number = channel->channel;
449 if (efx->n_channels > efx->n_rx_queues) { 448 if (efx->n_channels > efx->n_rx_channels) {
450 if (channel->channel < efx->n_rx_queues) { 449 if (channel->channel < efx->n_rx_channels) {
451 type = "-rx"; 450 type = "-rx";
452 } else { 451 } else {
453 type = "-tx"; 452 type = "-tx";
454 number -= efx->n_rx_queues; 453 number -= efx->n_rx_channels;
455 } 454 }
456 } 455 }
457 snprintf(channel->name, sizeof(channel->name), 456 snprintf(channel->name, sizeof(channel->name),
@@ -585,8 +584,6 @@ static void efx_remove_channel(struct efx_channel *channel)
585 efx_for_each_channel_tx_queue(tx_queue, channel) 584 efx_for_each_channel_tx_queue(tx_queue, channel)
586 efx_remove_tx_queue(tx_queue); 585 efx_remove_tx_queue(tx_queue);
587 efx_remove_eventq(channel); 586 efx_remove_eventq(channel);
588
589 channel->used_flags = 0;
590} 587}
591 588
592void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay) 589void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
@@ -956,10 +953,9 @@ static void efx_fini_io(struct efx_nic *efx)
956 pci_disable_device(efx->pci_dev); 953 pci_disable_device(efx->pci_dev);
957} 954}
958 955
959/* Get number of RX queues wanted. Return number of online CPU 956/* Get number of channels wanted. Each channel will have its own IRQ,
960 * packages in the expectation that an IRQ balancer will spread 957 * 1 RX queue and/or 2 TX queues. */
961 * interrupts across them. */ 958static int efx_wanted_channels(void)
962static int efx_wanted_rx_queues(void)
963{ 959{
964 cpumask_var_t core_mask; 960 cpumask_var_t core_mask;
965 int count; 961 int count;
@@ -995,34 +991,39 @@ static void efx_probe_interrupts(struct efx_nic *efx)
995 991
996 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { 992 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
997 struct msix_entry xentries[EFX_MAX_CHANNELS]; 993 struct msix_entry xentries[EFX_MAX_CHANNELS];
998 int wanted_ints; 994 int n_channels;
999 int rx_queues;
1000 995
1001 /* We want one RX queue and interrupt per CPU package 996 n_channels = efx_wanted_channels();
1002 * (or as specified by the rss_cpus module parameter). 997 if (separate_tx_channels)
1003 * We will need one channel per interrupt. 998 n_channels *= 2;
1004 */ 999 n_channels = min(n_channels, max_channels);
1005 rx_queues = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
1006 wanted_ints = rx_queues + (separate_tx_channels ? 1 : 0);
1007 wanted_ints = min(wanted_ints, max_channels);
1008 1000
1009 for (i = 0; i < wanted_ints; i++) 1001 for (i = 0; i < n_channels; i++)
1010 xentries[i].entry = i; 1002 xentries[i].entry = i;
1011 rc = pci_enable_msix(efx->pci_dev, xentries, wanted_ints); 1003 rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
1012 if (rc > 0) { 1004 if (rc > 0) {
1013 EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors" 1005 EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors"
1014 " available (%d < %d).\n", rc, wanted_ints); 1006 " available (%d < %d).\n", rc, n_channels);
1015 EFX_ERR(efx, "WARNING: Performance may be reduced.\n"); 1007 EFX_ERR(efx, "WARNING: Performance may be reduced.\n");
1016 EFX_BUG_ON_PARANOID(rc >= wanted_ints); 1008 EFX_BUG_ON_PARANOID(rc >= n_channels);
1017 wanted_ints = rc; 1009 n_channels = rc;
1018 rc = pci_enable_msix(efx->pci_dev, xentries, 1010 rc = pci_enable_msix(efx->pci_dev, xentries,
1019 wanted_ints); 1011 n_channels);
1020 } 1012 }
1021 1013
1022 if (rc == 0) { 1014 if (rc == 0) {
1023 efx->n_rx_queues = min(rx_queues, wanted_ints); 1015 efx->n_channels = n_channels;
1024 efx->n_channels = wanted_ints; 1016 if (separate_tx_channels) {
1025 for (i = 0; i < wanted_ints; i++) 1017 efx->n_tx_channels =
1018 max(efx->n_channels / 2, 1U);
1019 efx->n_rx_channels =
1020 max(efx->n_channels -
1021 efx->n_tx_channels, 1U);
1022 } else {
1023 efx->n_tx_channels = efx->n_channels;
1024 efx->n_rx_channels = efx->n_channels;
1025 }
1026 for (i = 0; i < n_channels; i++)
1026 efx->channel[i].irq = xentries[i].vector; 1027 efx->channel[i].irq = xentries[i].vector;
1027 } else { 1028 } else {
1028 /* Fall back to single channel MSI */ 1029 /* Fall back to single channel MSI */
@@ -1033,8 +1034,9 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1033 1034
1034 /* Try single interrupt MSI */ 1035 /* Try single interrupt MSI */
1035 if (efx->interrupt_mode == EFX_INT_MODE_MSI) { 1036 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
1036 efx->n_rx_queues = 1;
1037 efx->n_channels = 1; 1037 efx->n_channels = 1;
1038 efx->n_rx_channels = 1;
1039 efx->n_tx_channels = 1;
1038 rc = pci_enable_msi(efx->pci_dev); 1040 rc = pci_enable_msi(efx->pci_dev);
1039 if (rc == 0) { 1041 if (rc == 0) {
1040 efx->channel[0].irq = efx->pci_dev->irq; 1042 efx->channel[0].irq = efx->pci_dev->irq;
@@ -1046,8 +1048,9 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1046 1048
1047 /* Assume legacy interrupts */ 1049 /* Assume legacy interrupts */
1048 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { 1050 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
1049 efx->n_rx_queues = 1;
1050 efx->n_channels = 1 + (separate_tx_channels ? 1 : 0); 1051 efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
1052 efx->n_rx_channels = 1;
1053 efx->n_tx_channels = 1;
1051 efx->legacy_irq = efx->pci_dev->irq; 1054 efx->legacy_irq = efx->pci_dev->irq;
1052 } 1055 }
1053} 1056}
@@ -1068,21 +1071,24 @@ static void efx_remove_interrupts(struct efx_nic *efx)
1068 1071
1069static void efx_set_channels(struct efx_nic *efx) 1072static void efx_set_channels(struct efx_nic *efx)
1070{ 1073{
1074 struct efx_channel *channel;
1071 struct efx_tx_queue *tx_queue; 1075 struct efx_tx_queue *tx_queue;
1072 struct efx_rx_queue *rx_queue; 1076 struct efx_rx_queue *rx_queue;
1077 unsigned tx_channel_offset =
1078 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1073 1079
1074 efx_for_each_tx_queue(tx_queue, efx) { 1080 efx_for_each_channel(channel, efx) {
1075 if (separate_tx_channels) 1081 if (channel->channel - tx_channel_offset < efx->n_tx_channels) {
1076 tx_queue->channel = &efx->channel[efx->n_channels-1]; 1082 channel->tx_queue = &efx->tx_queue[
1077 else 1083 (channel->channel - tx_channel_offset) *
1078 tx_queue->channel = &efx->channel[0]; 1084 EFX_TXQ_TYPES];
1079 tx_queue->channel->used_flags |= EFX_USED_BY_TX; 1085 efx_for_each_channel_tx_queue(tx_queue, channel)
1086 tx_queue->channel = channel;
1087 }
1080 } 1088 }
1081 1089
1082 efx_for_each_rx_queue(rx_queue, efx) { 1090 efx_for_each_rx_queue(rx_queue, efx)
1083 rx_queue->channel = &efx->channel[rx_queue->queue]; 1091 rx_queue->channel = &efx->channel[rx_queue->queue];
1084 rx_queue->channel->used_flags |= EFX_USED_BY_RX;
1085 }
1086} 1092}
1087 1093
1088static int efx_probe_nic(struct efx_nic *efx) 1094static int efx_probe_nic(struct efx_nic *efx)
@@ -1096,11 +1102,12 @@ static int efx_probe_nic(struct efx_nic *efx)
1096 if (rc) 1102 if (rc)
1097 return rc; 1103 return rc;
1098 1104
1099 /* Determine the number of channels and RX queues by trying to hook 1105 /* Determine the number of channels and queues by trying to hook
1100 * in MSI-X interrupts. */ 1106 * in MSI-X interrupts. */
1101 efx_probe_interrupts(efx); 1107 efx_probe_interrupts(efx);
1102 1108
1103 efx_set_channels(efx); 1109 efx_set_channels(efx);
1110 efx->net_dev->real_num_tx_queues = efx->n_tx_channels;
1104 1111
1105 /* Initialise the interrupt moderation settings */ 1112 /* Initialise the interrupt moderation settings */
1106 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); 1113 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
@@ -1187,11 +1194,12 @@ static void efx_start_all(struct efx_nic *efx)
1187 /* Mark the port as enabled so port reconfigurations can start, then 1194 /* Mark the port as enabled so port reconfigurations can start, then
1188 * restart the transmit interface early so the watchdog timer stops */ 1195 * restart the transmit interface early so the watchdog timer stops */
1189 efx_start_port(efx); 1196 efx_start_port(efx);
1190 if (efx_dev_registered(efx))
1191 efx_wake_queue(efx);
1192 1197
1193 efx_for_each_channel(channel, efx) 1198 efx_for_each_channel(channel, efx) {
1199 if (efx_dev_registered(efx))
1200 efx_wake_queue(channel);
1194 efx_start_channel(channel); 1201 efx_start_channel(channel);
1202 }
1195 1203
1196 efx_nic_enable_interrupts(efx); 1204 efx_nic_enable_interrupts(efx);
1197 1205
@@ -1282,7 +1290,9 @@ static void efx_stop_all(struct efx_nic *efx)
1282 /* Stop the kernel transmit interface late, so the watchdog 1290 /* Stop the kernel transmit interface late, so the watchdog
1283 * timer isn't ticking over the flush */ 1291 * timer isn't ticking over the flush */
1284 if (efx_dev_registered(efx)) { 1292 if (efx_dev_registered(efx)) {
1285 efx_stop_queue(efx); 1293 struct efx_channel *channel;
1294 efx_for_each_channel(channel, efx)
1295 efx_stop_queue(channel);
1286 netif_tx_lock_bh(efx->net_dev); 1296 netif_tx_lock_bh(efx->net_dev);
1287 netif_tx_unlock_bh(efx->net_dev); 1297 netif_tx_unlock_bh(efx->net_dev);
1288 } 1298 }
@@ -1537,9 +1547,8 @@ static void efx_watchdog(struct net_device *net_dev)
1537{ 1547{
1538 struct efx_nic *efx = netdev_priv(net_dev); 1548 struct efx_nic *efx = netdev_priv(net_dev);
1539 1549
1540 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d:" 1550 EFX_ERR(efx, "TX stuck with port_enabled=%d: resetting channels\n",
1541 " resetting channels\n", 1551 efx->port_enabled);
1542 atomic_read(&efx->netif_stop_count), efx->port_enabled);
1543 1552
1544 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); 1553 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
1545} 1554}
@@ -1861,6 +1870,7 @@ out:
1861 } 1870 }
1862 1871
1863 if (disabled) { 1872 if (disabled) {
1873 dev_close(efx->net_dev);
1864 EFX_ERR(efx, "has been disabled\n"); 1874 EFX_ERR(efx, "has been disabled\n");
1865 efx->state = STATE_DISABLED; 1875 efx->state = STATE_DISABLED;
1866 } else { 1876 } else {
@@ -1884,8 +1894,7 @@ static void efx_reset_work(struct work_struct *data)
1884 } 1894 }
1885 1895
1886 rtnl_lock(); 1896 rtnl_lock();
1887 if (efx_reset(efx, efx->reset_pending)) 1897 (void)efx_reset(efx, efx->reset_pending);
1888 dev_close(efx->net_dev);
1889 rtnl_unlock(); 1898 rtnl_unlock();
1890} 1899}
1891 1900
@@ -2014,22 +2023,22 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2014 2023
2015 efx->net_dev = net_dev; 2024 efx->net_dev = net_dev;
2016 efx->rx_checksum_enabled = true; 2025 efx->rx_checksum_enabled = true;
2017 spin_lock_init(&efx->netif_stop_lock);
2018 spin_lock_init(&efx->stats_lock); 2026 spin_lock_init(&efx->stats_lock);
2019 mutex_init(&efx->mac_lock); 2027 mutex_init(&efx->mac_lock);
2020 efx->mac_op = type->default_mac_ops; 2028 efx->mac_op = type->default_mac_ops;
2021 efx->phy_op = &efx_dummy_phy_operations; 2029 efx->phy_op = &efx_dummy_phy_operations;
2022 efx->mdio.dev = net_dev; 2030 efx->mdio.dev = net_dev;
2023 INIT_WORK(&efx->mac_work, efx_mac_work); 2031 INIT_WORK(&efx->mac_work, efx_mac_work);
2024 atomic_set(&efx->netif_stop_count, 1);
2025 2032
2026 for (i = 0; i < EFX_MAX_CHANNELS; i++) { 2033 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
2027 channel = &efx->channel[i]; 2034 channel = &efx->channel[i];
2028 channel->efx = efx; 2035 channel->efx = efx;
2029 channel->channel = i; 2036 channel->channel = i;
2030 channel->work_pending = false; 2037 channel->work_pending = false;
2038 spin_lock_init(&channel->tx_stop_lock);
2039 atomic_set(&channel->tx_stop_count, 1);
2031 } 2040 }
2032 for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) { 2041 for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
2033 tx_queue = &efx->tx_queue[i]; 2042 tx_queue = &efx->tx_queue[i];
2034 tx_queue->efx = efx; 2043 tx_queue->efx = efx;
2035 tx_queue->queue = i; 2044 tx_queue->queue = i;
@@ -2201,7 +2210,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2201 int i, rc; 2210 int i, rc;
2202 2211
2203 /* Allocate and initialise a struct net_device and struct efx_nic */ 2212 /* Allocate and initialise a struct net_device and struct efx_nic */
2204 net_dev = alloc_etherdev(sizeof(*efx)); 2213 net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES);
2205 if (!net_dev) 2214 if (!net_dev)
2206 return -ENOMEM; 2215 return -ENOMEM;
2207 net_dev->features |= (type->offload_features | NETIF_F_SG | 2216 net_dev->features |= (type->offload_features | NETIF_F_SG |
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 7eff0a615cb3..ffd708c5304a 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -35,8 +35,8 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
35extern netdev_tx_t 35extern netdev_tx_t
36efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 36efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
37extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 37extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
38extern void efx_stop_queue(struct efx_nic *efx); 38extern void efx_stop_queue(struct efx_channel *channel);
39extern void efx_wake_queue(struct efx_nic *efx); 39extern void efx_wake_queue(struct efx_channel *channel);
40#define EFX_TXQ_SIZE 1024 40#define EFX_TXQ_SIZE 1024
41#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1) 41#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1)
42 42
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index d9f9c02a928e..22026bfbc4c1 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -304,7 +304,7 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
304{ 304{
305 struct efx_tx_queue *tx_queue; 305 struct efx_tx_queue *tx_queue;
306 306
307 efx_for_each_tx_queue(tx_queue, efx) { 307 efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) {
308 efx_fill_test(test_index++, strings, data, 308 efx_fill_test(test_index++, strings, data,
309 &lb_tests->tx_sent[tx_queue->queue], 309 &lb_tests->tx_sent[tx_queue->queue],
310 EFX_TX_QUEUE_NAME(tx_queue), 310 EFX_TX_QUEUE_NAME(tx_queue),
@@ -647,7 +647,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
647 efx_for_each_tx_queue(tx_queue, efx) { 647 efx_for_each_tx_queue(tx_queue, efx) {
648 channel = tx_queue->channel; 648 channel = tx_queue->channel;
649 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { 649 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
650 if (channel->used_flags != EFX_USED_BY_RX_TX) 650 if (channel->channel < efx->n_rx_channels)
651 coalesce->tx_coalesce_usecs_irq = 651 coalesce->tx_coalesce_usecs_irq =
652 channel->irq_moderation; 652 channel->irq_moderation;
653 else 653 else
@@ -690,7 +690,7 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
690 690
691 /* If the channel is shared only allow RX parameters to be set */ 691 /* If the channel is shared only allow RX parameters to be set */
692 efx_for_each_tx_queue(tx_queue, efx) { 692 efx_for_each_tx_queue(tx_queue, efx) {
693 if ((tx_queue->channel->used_flags == EFX_USED_BY_RX_TX) && 693 if ((tx_queue->channel->channel < efx->n_rx_channels) &&
694 tx_usecs) { 694 tx_usecs) {
695 EFX_ERR(efx, "Channel is shared. " 695 EFX_ERR(efx, "Channel is shared. "
696 "Only RX coalescing may be set\n"); 696 "Only RX coalescing may be set\n");
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index d294d66fd600..655b697b45b2 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -175,16 +175,19 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
175 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 175 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
176 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 176 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
177 177
178 /* Check to see if we have a serious error condition */
179 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
180 if (unlikely(syserr))
181 return efx_nic_fatal_interrupt(efx);
182
183 /* Determine interrupting queues, clear interrupt status 178 /* Determine interrupting queues, clear interrupt status
184 * register and acknowledge the device interrupt. 179 * register and acknowledge the device interrupt.
185 */ 180 */
186 BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS); 181 BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
187 queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q); 182 queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
183
184 /* Check to see if we have a serious error condition */
185 if (queues & (1U << efx->fatal_irq_level)) {
186 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
187 if (unlikely(syserr))
188 return efx_nic_fatal_interrupt(efx);
189 }
190
188 EFX_ZERO_OWORD(*int_ker); 191 EFX_ZERO_OWORD(*int_ker);
189 wmb(); /* Ensure the vector is cleared before interrupt ack */ 192 wmb(); /* Ensure the vector is cleared before interrupt ack */
190 falcon_irq_ack_a1(efx); 193 falcon_irq_ack_a1(efx);
@@ -504,6 +507,9 @@ static void falcon_reset_macs(struct efx_nic *efx)
504 /* Ensure the correct MAC is selected before statistics 507 /* Ensure the correct MAC is selected before statistics
505 * are re-enabled by the caller */ 508 * are re-enabled by the caller */
506 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL); 509 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
510
511 /* This can run even when the GMAC is selected */
512 falcon_setup_xaui(efx);
507} 513}
508 514
509void falcon_drain_tx_fifo(struct efx_nic *efx) 515void falcon_drain_tx_fifo(struct efx_nic *efx)
@@ -1320,7 +1326,9 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
1320 1326
1321 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); 1327 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
1322 1328
1323 falcon_probe_board(efx, board_rev); 1329 rc = falcon_probe_board(efx, board_rev);
1330 if (rc)
1331 goto fail2;
1324 1332
1325 kfree(nvconfig); 1333 kfree(nvconfig);
1326 return 0; 1334 return 0;
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index 5712fddd72f2..c7a933a3292e 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -728,15 +728,7 @@ static const struct falcon_board_type board_types[] = {
728 }, 728 },
729}; 729};
730 730
731static const struct falcon_board_type falcon_dummy_board = { 731int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
732 .init = efx_port_dummy_op_int,
733 .init_phy = efx_port_dummy_op_void,
734 .fini = efx_port_dummy_op_void,
735 .set_id_led = efx_port_dummy_op_set_id_led,
736 .monitor = efx_port_dummy_op_int,
737};
738
739void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
740{ 732{
741 struct falcon_board *board = falcon_board(efx); 733 struct falcon_board *board = falcon_board(efx);
742 u8 type_id = FALCON_BOARD_TYPE(revision_info); 734 u8 type_id = FALCON_BOARD_TYPE(revision_info);
@@ -754,8 +746,9 @@ void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
754 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) 746 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
755 ? board->type->ref_model : board->type->gen_type, 747 ? board->type->ref_model : board->type->gen_type,
756 'A' + board->major, board->minor); 748 'A' + board->major, board->minor);
749 return 0;
757 } else { 750 } else {
758 EFX_ERR(efx, "unknown board type %d\n", type_id); 751 EFX_ERR(efx, "unknown board type %d\n", type_id);
759 board->type = &falcon_dummy_board; 752 return -ENODEV;
760 } 753 }
761} 754}
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index 8ccab2c67a20..c84a2ce2ccbb 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -26,7 +26,7 @@
26 *************************************************************************/ 26 *************************************************************************/
27 27
28/* Configure the XAUI driver that is an output from Falcon */ 28/* Configure the XAUI driver that is an output from Falcon */
29static void falcon_setup_xaui(struct efx_nic *efx) 29void falcon_setup_xaui(struct efx_nic *efx)
30{ 30{
31 efx_oword_t sdctl, txdrv; 31 efx_oword_t sdctl, txdrv;
32 32
@@ -85,14 +85,14 @@ int falcon_reset_xaui(struct efx_nic *efx)
85 return -ETIMEDOUT; 85 return -ETIMEDOUT;
86} 86}
87 87
88static void falcon_mask_status_intr(struct efx_nic *efx, bool enable) 88static void falcon_ack_status_intr(struct efx_nic *efx)
89{ 89{
90 efx_oword_t reg; 90 efx_oword_t reg;
91 91
92 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx)) 92 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
93 return; 93 return;
94 94
95 /* We expect xgmii faults if the wireside link is up */ 95 /* We expect xgmii faults if the wireside link is down */
96 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up) 96 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up)
97 return; 97 return;
98 98
@@ -101,14 +101,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
101 if (efx->xmac_poll_required) 101 if (efx->xmac_poll_required)
102 return; 102 return;
103 103
104 /* Flush the ISR */ 104 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
105 if (enable)
106 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
107
108 EFX_POPULATE_OWORD_2(reg,
109 FRF_AB_XM_MSK_RMTFLT, !enable,
110 FRF_AB_XM_MSK_LCLFLT, !enable);
111 efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK);
112} 105}
113 106
114static bool falcon_xgxs_link_ok(struct efx_nic *efx) 107static bool falcon_xgxs_link_ok(struct efx_nic *efx)
@@ -283,15 +276,13 @@ static bool falcon_xmac_check_fault(struct efx_nic *efx)
283 276
284static int falcon_reconfigure_xmac(struct efx_nic *efx) 277static int falcon_reconfigure_xmac(struct efx_nic *efx)
285{ 278{
286 falcon_mask_status_intr(efx, false);
287
288 falcon_reconfigure_xgxs_core(efx); 279 falcon_reconfigure_xgxs_core(efx);
289 falcon_reconfigure_xmac_core(efx); 280 falcon_reconfigure_xmac_core(efx);
290 281
291 falcon_reconfigure_mac_wrapper(efx); 282 falcon_reconfigure_mac_wrapper(efx);
292 283
293 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5); 284 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
294 falcon_mask_status_intr(efx, true); 285 falcon_ack_status_intr(efx);
295 286
296 return 0; 287 return 0;
297} 288}
@@ -362,9 +353,8 @@ void falcon_poll_xmac(struct efx_nic *efx)
362 !efx->xmac_poll_required) 353 !efx->xmac_poll_required)
363 return; 354 return;
364 355
365 falcon_mask_status_intr(efx, false);
366 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1); 356 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
367 falcon_mask_status_intr(efx, true); 357 falcon_ack_status_intr(efx);
368} 358}
369 359
370struct efx_mac_operations falcon_xmac_operations = { 360struct efx_mac_operations falcon_xmac_operations = {
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index c48669c77414..93cc3c1b9450 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -613,7 +613,7 @@ int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
613 } 613 }
614 614
615 if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) { 615 if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) {
616 rc = -EMSGSIZE; 616 rc = -EIO;
617 goto fail; 617 goto fail;
618 } 618 }
619 619
@@ -647,8 +647,10 @@ int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
647 outbuf, sizeof(outbuf), &outlen); 647 outbuf, sizeof(outbuf), &outlen);
648 if (rc) 648 if (rc)
649 goto fail; 649 goto fail;
650 if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) 650 if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
651 rc = -EIO;
651 goto fail; 652 goto fail;
653 }
652 654
653 if (was_attached != NULL) 655 if (was_attached != NULL)
654 *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); 656 *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
@@ -676,7 +678,7 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
676 goto fail; 678 goto fail;
677 679
678 if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) { 680 if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) {
679 rc = -EMSGSIZE; 681 rc = -EIO;
680 goto fail; 682 goto fail;
681 } 683 }
682 684
@@ -738,8 +740,10 @@ int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
738 outbuf, sizeof(outbuf), &outlen); 740 outbuf, sizeof(outbuf), &outlen);
739 if (rc) 741 if (rc)
740 goto fail; 742 goto fail;
741 if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) 743 if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
744 rc = -EIO;
742 goto fail; 745 goto fail;
746 }
743 747
744 *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); 748 *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
745 return 0; 749 return 0;
@@ -765,8 +769,10 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
765 outbuf, sizeof(outbuf), &outlen); 769 outbuf, sizeof(outbuf), &outlen);
766 if (rc) 770 if (rc)
767 goto fail; 771 goto fail;
768 if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) 772 if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
773 rc = -EIO;
769 goto fail; 774 goto fail;
775 }
770 776
771 *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); 777 *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
772 *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); 778 *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
@@ -926,20 +932,26 @@ int efx_mcdi_nvram_test_all(struct efx_nic *efx)
926 932
927 rc = efx_mcdi_nvram_types(efx, &nvram_types); 933 rc = efx_mcdi_nvram_types(efx, &nvram_types);
928 if (rc) 934 if (rc)
929 return rc; 935 goto fail1;
930 936
931 type = 0; 937 type = 0;
932 while (nvram_types != 0) { 938 while (nvram_types != 0) {
933 if (nvram_types & 1) { 939 if (nvram_types & 1) {
934 rc = efx_mcdi_nvram_test(efx, type); 940 rc = efx_mcdi_nvram_test(efx, type);
935 if (rc) 941 if (rc)
936 return rc; 942 goto fail2;
937 } 943 }
938 type++; 944 type++;
939 nvram_types >>= 1; 945 nvram_types >>= 1;
940 } 946 }
941 947
942 return 0; 948 return 0;
949
950fail2:
951 EFX_ERR(efx, "%s: failed type=%u\n", __func__, type);
952fail1:
953 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
954 return rc;
943} 955}
944 956
945static int efx_mcdi_read_assertion(struct efx_nic *efx) 957static int efx_mcdi_read_assertion(struct efx_nic *efx)
@@ -968,7 +980,7 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
968 if (rc) 980 if (rc)
969 return rc; 981 return rc;
970 if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) 982 if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
971 return -EINVAL; 983 return -EIO;
972 984
973 /* Print out any recorded assertion state */ 985 /* Print out any recorded assertion state */
974 flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); 986 flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
@@ -1086,7 +1098,7 @@ int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
1086 goto fail; 1098 goto fail;
1087 1099
1088 if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { 1100 if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
1089 rc = -EMSGSIZE; 1101 rc = -EIO;
1090 goto fail; 1102 goto fail;
1091 } 1103 }
1092 1104
@@ -1121,7 +1133,7 @@ int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
1121 goto fail; 1133 goto fail;
1122 1134
1123 if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { 1135 if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
1124 rc = -EMSGSIZE; 1136 rc = -EIO;
1125 goto fail; 1137 goto fail;
1126 } 1138 }
1127 1139
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c
index 06d24a1e412a..39182631ac92 100644
--- a/drivers/net/sfc/mcdi_mac.c
+++ b/drivers/net/sfc/mcdi_mac.c
@@ -80,7 +80,7 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
80 u8 inbuf[MC_CMD_MAC_STATS_IN_LEN]; 80 u8 inbuf[MC_CMD_MAC_STATS_IN_LEN];
81 int rc; 81 int rc;
82 efx_dword_t *cmd_ptr; 82 efx_dword_t *cmd_ptr;
83 int period = 1000; 83 int period = enable ? 1000 : 0;
84 u32 addr_hi; 84 u32 addr_hi;
85 u32 addr_lo; 85 u32 addr_lo;
86 86
@@ -92,21 +92,14 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
92 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo); 92 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo);
93 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi); 93 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi);
94 cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD); 94 cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD);
95 if (enable) 95 EFX_POPULATE_DWORD_7(*cmd_ptr,
96 EFX_POPULATE_DWORD_6(*cmd_ptr, 96 MC_CMD_MAC_STATS_CMD_DMA, !!enable,
97 MC_CMD_MAC_STATS_CMD_DMA, 1, 97 MC_CMD_MAC_STATS_CMD_CLEAR, clear,
98 MC_CMD_MAC_STATS_CMD_CLEAR, clear, 98 MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
99 MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1, 99 MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, !!enable,
100 MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, 1, 100 MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0,
101 MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0, 101 MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT, 1,
102 MC_CMD_MAC_STATS_CMD_PERIOD_MS, period); 102 MC_CMD_MAC_STATS_CMD_PERIOD_MS, period);
103 else
104 EFX_POPULATE_DWORD_5(*cmd_ptr,
105 MC_CMD_MAC_STATS_CMD_DMA, 0,
106 MC_CMD_MAC_STATS_CMD_CLEAR, clear,
107 MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
108 MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, 0,
109 MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0);
110 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); 103 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
111 104
112 rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), 105 rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index bd59302695b3..90359e644006 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -863,7 +863,7 @@
863 * bist output. The driver should only consume the BIST output 863 * bist output. The driver should only consume the BIST output
864 * after validating OUTLEN and PHY_CFG.PHY_TYPE. 864 * after validating OUTLEN and PHY_CFG.PHY_TYPE.
865 * 865 *
866 * If a driver can't succesfully parse the BIST output, it should 866 * If a driver can't successfully parse the BIST output, it should
867 * still respect the pass/Fail in OUT.RESULT 867 * still respect the pass/Fail in OUT.RESULT
868 * 868 *
869 * Locks required: PHY_LOCK if doing a PHY BIST 869 * Locks required: PHY_LOCK if doing a PHY BIST
@@ -872,7 +872,7 @@
872#define MC_CMD_POLL_BIST 0x26 872#define MC_CMD_POLL_BIST 0x26
873#define MC_CMD_POLL_BIST_IN_LEN 0 873#define MC_CMD_POLL_BIST_IN_LEN 0
874#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN 874#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN
875#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 40 875#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
876#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8 876#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
877#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 877#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
878#define MC_CMD_POLL_BIST_RUNNING 1 878#define MC_CMD_POLL_BIST_RUNNING 1
@@ -882,15 +882,14 @@
882/* Generic: */ 882/* Generic: */
883#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4 883#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
884/* SFT9001-specific: */ 884/* SFT9001-specific: */
885/* (offset 4 unused?) */ 885#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
886#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 8 886#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
887#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 12 887#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
888#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 16 888#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
889#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 20 889#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
890#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 24 890#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
891#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 28 891#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
892#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 32 892#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
893#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 36
894#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 1 893#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 1
895#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 2 894#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 2
896#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 3 895#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 3
@@ -1054,9 +1053,13 @@
1054/* MC_CMD_PHY_STATS: 1053/* MC_CMD_PHY_STATS:
1055 * Get generic PHY statistics 1054 * Get generic PHY statistics
1056 * 1055 *
1057 * This call returns the statistics for a generic PHY, by direct DMA 1056 * This call returns the statistics for a generic PHY in a sparse
1058 * into host memory, in a sparse array (indexed by the enumerate). 1057 * array (indexed by the enumerate). Each value is represented by
1059 * Each value is represented by a 32bit number. 1058 * a 32bit number.
1059 *
1060 * If the DMA_ADDR is 0, then no DMA is performed, and the statistics
1061 * may be read directly out of shared memory. If DMA_ADDR != 0, then
1062 * the statistics are dmad to that (page-aligned location)
1060 * 1063 *
1061 * Locks required: None 1064 * Locks required: None
1062 * Returns: 0, ETIME 1065 * Returns: 0, ETIME
@@ -1066,7 +1069,8 @@
1066#define MC_CMD_PHY_STATS_IN_LEN 8 1069#define MC_CMD_PHY_STATS_IN_LEN 8
1067#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0 1070#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
1068#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4 1071#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
1069#define MC_CMD_PHY_STATS_OUT_LEN 0 1072#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
1073#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (MC_CMD_PHY_NSTATS * 4)
1070 1074
1071/* Unified MAC statistics enumeration */ 1075/* Unified MAC statistics enumeration */
1072#define MC_CMD_MAC_GENERATION_START 0 1076#define MC_CMD_MAC_GENERATION_START 0
@@ -1158,11 +1162,13 @@
1158#define MC_CMD_MAC_STATS_CMD_CLEAR_WIDTH 1 1162#define MC_CMD_MAC_STATS_CMD_CLEAR_WIDTH 1
1159#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_LBN 2 1163#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_LBN 2
1160#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_WIDTH 1 1164#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_WIDTH 1
1161/* Fields only relevent when PERIODIC_CHANGE is set */ 1165/* Remaining PERIOD* fields only relevent when PERIODIC_CHANGE is set */
1162#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_LBN 3 1166#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_LBN 3
1163#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_WIDTH 1 1167#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_WIDTH 1
1164#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_LBN 4 1168#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_LBN 4
1165#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_WIDTH 1 1169#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_WIDTH 1
1170#define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_LBN 5
1171#define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_WIDTH 1
1166#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_LBN 16 1172#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_LBN 16
1167#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_WIDTH 16 1173#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_WIDTH 16
1168#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12 1174#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
@@ -1729,6 +1735,39 @@
1729#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 /* output bits */ 1735#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 /* output bits */
1730#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 /* dirs: 0=out, 1=in */ 1736#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 /* dirs: 0=out, 1=in */
1731 1737
1738/* MC_CMD_TEST_HACK: (debug (unsurprisingly))
1739 * Change bits of network port state for test purposes in ways that would never be
1740 * useful in normal operation and so need a special command to change. */
1741#define MC_CMD_TEST_HACK 0x2f
1742#define MC_CMD_TEST_HACK_IN_LEN 8
1743#define MC_CMD_TEST_HACK_IN_TXPAD_OFST 0
1744#define MC_CMD_TEST_HACK_IN_TXPAD_AUTO 0 /* Let the MC manage things */
1745#define MC_CMD_TEST_HACK_IN_TXPAD_ON 1 /* Force on */
1746#define MC_CMD_TEST_HACK_IN_TXPAD_OFF 2 /* Force on */
1747#define MC_CMD_TEST_HACK_IN_IPG_OFST 4 /* Takes a value in bits */
1748#define MC_CMD_TEST_HACK_IN_IPG_AUTO 0 /* The MC picks the value */
1749#define MC_CMD_TEST_HACK_OUT_LEN 0
1750
1751/* MC_CMD_SENSOR_SET_LIMS: (debug) (mostly) adjust the sensor limits. This
1752 * is a warranty-voiding operation.
1753 *
1754 * IN: sensor identifier (one of the enumeration starting with MC_CMD_SENSOR_CONTROLLER_TEMP
1755 * followed by 4 32-bit values: min(warning) max(warning), min(fatal), max(fatal). Which
1756 * of these limits are meaningful and what their interpretation is is sensor-specific.
1757 *
1758 * OUT: nothing
1759 *
1760 * Returns: ENOENT if the sensor specified does not exist, EINVAL if the limits are
1761 * out of range.
1762 */
1763#define MC_CMD_SENSOR_SET_LIMS 0x4e
1764#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
1765#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
1766#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
1767#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
1768#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
1769#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
1770
1732/* Do NOT add new commands beyond 0x4f as part of 3.0 : 0x50 - 0x7f will be 1771/* Do NOT add new commands beyond 0x4f as part of 3.0 : 0x50 - 0x7f will be
1733 * used for post-3.0 extensions. If you run out of space, look for gaps or 1772 * used for post-3.0 extensions. If you run out of space, look for gaps or
1734 * commands that are unused in the existing range. */ 1773 * commands that are unused in the existing range. */
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 2f2354696663..6032c0e1f1f8 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -17,6 +17,8 @@
17#include "mcdi.h" 17#include "mcdi.h"
18#include "mcdi_pcol.h" 18#include "mcdi_pcol.h"
19#include "mdio_10g.h" 19#include "mdio_10g.h"
20#include "nic.h"
21#include "selftest.h"
20 22
21struct efx_mcdi_phy_cfg { 23struct efx_mcdi_phy_cfg {
22 u32 flags; 24 u32 flags;
@@ -48,7 +50,7 @@ efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_cfg *cfg)
48 goto fail; 50 goto fail;
49 51
50 if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) { 52 if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
51 rc = -EMSGSIZE; 53 rc = -EIO;
52 goto fail; 54 goto fail;
53 } 55 }
54 56
@@ -111,7 +113,7 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
111 goto fail; 113 goto fail;
112 114
113 if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) { 115 if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) {
114 rc = -EMSGSIZE; 116 rc = -EIO;
115 goto fail; 117 goto fail;
116 } 118 }
117 119
@@ -587,13 +589,153 @@ static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
587 return rc; 589 return rc;
588 590
589 if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN) 591 if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
590 return -EMSGSIZE; 592 return -EIO;
591 if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK) 593 if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK)
592 return -EINVAL; 594 return -EINVAL;
593 595
594 return 0; 596 return 0;
595} 597}
596 598
599static const char *const mcdi_sft9001_cable_diag_names[] = {
600 "cable.pairA.length",
601 "cable.pairB.length",
602 "cable.pairC.length",
603 "cable.pairD.length",
604 "cable.pairA.status",
605 "cable.pairB.status",
606 "cable.pairC.status",
607 "cable.pairD.status",
608};
609
610static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
611 int *results)
612{
613 unsigned int retry, i, count = 0;
614 size_t outlen;
615 u32 status;
616 u8 *buf, *ptr;
617 int rc;
618
619 buf = kzalloc(0x100, GFP_KERNEL);
620 if (buf == NULL)
621 return -ENOMEM;
622
623 BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0);
624 MCDI_SET_DWORD(buf, START_BIST_IN_TYPE, bist_mode);
625 rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, buf, MC_CMD_START_BIST_IN_LEN,
626 NULL, 0, NULL);
627 if (rc)
628 goto out;
629
630 /* Wait up to 10s for BIST to finish */
631 for (retry = 0; retry < 100; ++retry) {
632 BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0);
633 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
634 buf, 0x100, &outlen);
635 if (rc)
636 goto out;
637
638 status = MCDI_DWORD(buf, POLL_BIST_OUT_RESULT);
639 if (status != MC_CMD_POLL_BIST_RUNNING)
640 goto finished;
641
642 msleep(100);
643 }
644
645 rc = -ETIMEDOUT;
646 goto out;
647
648finished:
649 results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1;
650
651 /* SFT9001 specific cable diagnostics output */
652 if (efx->phy_type == PHY_TYPE_SFT9001B &&
653 (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT ||
654 bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) {
655 ptr = MCDI_PTR(buf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
656 if (status == MC_CMD_POLL_BIST_PASSED &&
657 outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) {
658 for (i = 0; i < 8; i++) {
659 results[count + i] =
660 EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i],
661 EFX_DWORD_0);
662 }
663 }
664 count += 8;
665 }
666 rc = count;
667
668out:
669 kfree(buf);
670
671 return rc;
672}
673
674static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
675 unsigned flags)
676{
677 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
678 u32 mode;
679 int rc;
680
681 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
682 rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results);
683 if (rc < 0)
684 return rc;
685
686 results += rc;
687 }
688
689 /* If we support both LONG and SHORT, then run each in response to
690 * break or not. Otherwise, run the one we support */
691 mode = 0;
692 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN)) {
693 if ((flags & ETH_TEST_FL_OFFLINE) &&
694 (phy_cfg->flags &
695 (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN)))
696 mode = MC_CMD_PHY_BIST_CABLE_LONG;
697 else
698 mode = MC_CMD_PHY_BIST_CABLE_SHORT;
699 } else if (phy_cfg->flags &
700 (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))
701 mode = MC_CMD_PHY_BIST_CABLE_LONG;
702
703 if (mode != 0) {
704 rc = efx_mcdi_bist(efx, mode, results);
705 if (rc < 0)
706 return rc;
707 results += rc;
708 }
709
710 return 0;
711}
712
713const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index)
714{
715 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
716
717 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
718 if (index == 0)
719 return "bist";
720 --index;
721 }
722
723 if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN) |
724 (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))) {
725 if (index == 0)
726 return "cable";
727 --index;
728
729 if (efx->phy_type == PHY_TYPE_SFT9001B) {
730 if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names))
731 return mcdi_sft9001_cable_diag_names[index];
732 index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names);
733 }
734 }
735
736 return NULL;
737}
738
597struct efx_phy_operations efx_mcdi_phy_ops = { 739struct efx_phy_operations efx_mcdi_phy_ops = {
598 .probe = efx_mcdi_phy_probe, 740 .probe = efx_mcdi_phy_probe,
599 .init = efx_port_dummy_op_int, 741 .init = efx_port_dummy_op_int,
@@ -604,6 +746,6 @@ struct efx_phy_operations efx_mcdi_phy_ops = {
604 .get_settings = efx_mcdi_phy_get_settings, 746 .get_settings = efx_mcdi_phy_get_settings,
605 .set_settings = efx_mcdi_phy_set_settings, 747 .set_settings = efx_mcdi_phy_set_settings,
606 .test_alive = efx_mcdi_phy_test_alive, 748 .test_alive = efx_mcdi_phy_test_alive,
607 .run_tests = NULL, 749 .run_tests = efx_mcdi_phy_run_tests,
608 .test_name = NULL, 750 .test_name = efx_mcdi_phy_test_name,
609}; 751};
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index cb018e272097..2e6fd89f2a72 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -85,9 +85,13 @@ do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
85#define EFX_MAX_CHANNELS 32 85#define EFX_MAX_CHANNELS 32
86#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS 86#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
87 87
88#define EFX_TX_QUEUE_OFFLOAD_CSUM 0 88/* Checksum generation is a per-queue option in hardware, so each
89#define EFX_TX_QUEUE_NO_CSUM 1 89 * queue visible to the networking core is backed by two hardware TX
90#define EFX_TX_QUEUE_COUNT 2 90 * queues. */
91#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS
92#define EFX_TXQ_TYPE_OFFLOAD 1
93#define EFX_TXQ_TYPES 2
94#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES)
91 95
92/** 96/**
93 * struct efx_special_buffer - An Efx special buffer 97 * struct efx_special_buffer - An Efx special buffer
@@ -187,7 +191,7 @@ struct efx_tx_buffer {
187struct efx_tx_queue { 191struct efx_tx_queue {
188 /* Members which don't change on the fast path */ 192 /* Members which don't change on the fast path */
189 struct efx_nic *efx ____cacheline_aligned_in_smp; 193 struct efx_nic *efx ____cacheline_aligned_in_smp;
190 int queue; 194 unsigned queue;
191 struct efx_channel *channel; 195 struct efx_channel *channel;
192 struct efx_nic *nic; 196 struct efx_nic *nic;
193 struct efx_tx_buffer *buffer; 197 struct efx_tx_buffer *buffer;
@@ -306,11 +310,6 @@ struct efx_buffer {
306}; 310};
307 311
308 312
309/* Flags for channel->used_flags */
310#define EFX_USED_BY_RX 1
311#define EFX_USED_BY_TX 2
312#define EFX_USED_BY_RX_TX (EFX_USED_BY_RX | EFX_USED_BY_TX)
313
314enum efx_rx_alloc_method { 313enum efx_rx_alloc_method {
315 RX_ALLOC_METHOD_AUTO = 0, 314 RX_ALLOC_METHOD_AUTO = 0,
316 RX_ALLOC_METHOD_SKB = 1, 315 RX_ALLOC_METHOD_SKB = 1,
@@ -327,7 +326,6 @@ enum efx_rx_alloc_method {
327 * @efx: Associated Efx NIC 326 * @efx: Associated Efx NIC
328 * @channel: Channel instance number 327 * @channel: Channel instance number
329 * @name: Name for channel and IRQ 328 * @name: Name for channel and IRQ
330 * @used_flags: Channel is used by net driver
331 * @enabled: Channel enabled indicator 329 * @enabled: Channel enabled indicator
332 * @irq: IRQ number (MSI and MSI-X only) 330 * @irq: IRQ number (MSI and MSI-X only)
333 * @irq_moderation: IRQ moderation value (in hardware ticks) 331 * @irq_moderation: IRQ moderation value (in hardware ticks)
@@ -352,12 +350,14 @@ enum efx_rx_alloc_method {
352 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors 350 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
353 * @n_rx_overlength: Count of RX_OVERLENGTH errors 351 * @n_rx_overlength: Count of RX_OVERLENGTH errors
354 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun 352 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
353 * @tx_queue: Pointer to first TX queue, or %NULL if not used for TX
354 * @tx_stop_count: Core TX queue stop count
355 * @tx_stop_lock: Core TX queue stop lock
355 */ 356 */
356struct efx_channel { 357struct efx_channel {
357 struct efx_nic *efx; 358 struct efx_nic *efx;
358 int channel; 359 int channel;
359 char name[IFNAMSIZ + 6]; 360 char name[IFNAMSIZ + 6];
360 int used_flags;
361 bool enabled; 361 bool enabled;
362 int irq; 362 int irq;
363 unsigned int irq_moderation; 363 unsigned int irq_moderation;
@@ -389,6 +389,9 @@ struct efx_channel {
389 struct efx_rx_buffer *rx_pkt; 389 struct efx_rx_buffer *rx_pkt;
390 bool rx_pkt_csummed; 390 bool rx_pkt_csummed;
391 391
392 struct efx_tx_queue *tx_queue;
393 atomic_t tx_stop_count;
394 spinlock_t tx_stop_lock;
392}; 395};
393 396
394enum efx_led_mode { 397enum efx_led_mode {
@@ -661,8 +664,9 @@ union efx_multicast_hash {
661 * @rx_queue: RX DMA queues 664 * @rx_queue: RX DMA queues
662 * @channel: Channels 665 * @channel: Channels
663 * @next_buffer_table: First available buffer table id 666 * @next_buffer_table: First available buffer table id
664 * @n_rx_queues: Number of RX queues
665 * @n_channels: Number of channels in use 667 * @n_channels: Number of channels in use
668 * @n_rx_channels: Number of channels used for RX (= number of RX queues)
669 * @n_tx_channels: Number of channels used for TX
666 * @rx_buffer_len: RX buffer length 670 * @rx_buffer_len: RX buffer length
667 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 671 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
668 * @int_error_count: Number of internal errors seen recently 672 * @int_error_count: Number of internal errors seen recently
@@ -672,6 +676,8 @@ union efx_multicast_hash {
672 * This register is written with the SMP processor ID whenever an 676 * This register is written with the SMP processor ID whenever an
673 * interrupt is handled. It is used by efx_nic_test_interrupt() 677 * interrupt is handled. It is used by efx_nic_test_interrupt()
674 * to verify that an interrupt has occurred. 678 * to verify that an interrupt has occurred.
679 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
680 * @fatal_irq_level: IRQ level (bit number) used for serious errors
675 * @spi_flash: SPI flash device 681 * @spi_flash: SPI flash device
676 * This field will be %NULL if no flash device is present (or for Siena). 682 * This field will be %NULL if no flash device is present (or for Siena).
677 * @spi_eeprom: SPI EEPROM device 683 * @spi_eeprom: SPI EEPROM device
@@ -691,8 +697,6 @@ union efx_multicast_hash {
691 * @port_initialized: Port initialized? 697 * @port_initialized: Port initialized?
692 * @net_dev: Operating system network device. Consider holding the rtnl lock 698 * @net_dev: Operating system network device. Consider holding the rtnl lock
693 * @rx_checksum_enabled: RX checksumming enabled 699 * @rx_checksum_enabled: RX checksumming enabled
694 * @netif_stop_count: Port stop count
695 * @netif_stop_lock: Port stop lock
696 * @mac_stats: MAC statistics. These include all statistics the MACs 700 * @mac_stats: MAC statistics. These include all statistics the MACs
697 * can provide. Generic code converts these into a standard 701 * can provide. Generic code converts these into a standard
698 * &struct net_device_stats. 702 * &struct net_device_stats.
@@ -740,13 +744,14 @@ struct efx_nic {
740 enum nic_state state; 744 enum nic_state state;
741 enum reset_type reset_pending; 745 enum reset_type reset_pending;
742 746
743 struct efx_tx_queue tx_queue[EFX_TX_QUEUE_COUNT]; 747 struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES];
744 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES]; 748 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
745 struct efx_channel channel[EFX_MAX_CHANNELS]; 749 struct efx_channel channel[EFX_MAX_CHANNELS];
746 750
747 unsigned next_buffer_table; 751 unsigned next_buffer_table;
748 int n_rx_queues; 752 unsigned n_channels;
749 int n_channels; 753 unsigned n_rx_channels;
754 unsigned n_tx_channels;
750 unsigned int rx_buffer_len; 755 unsigned int rx_buffer_len;
751 unsigned int rx_buffer_order; 756 unsigned int rx_buffer_order;
752 757
@@ -755,7 +760,8 @@ struct efx_nic {
755 760
756 struct efx_buffer irq_status; 761 struct efx_buffer irq_status;
757 volatile signed int last_irq_cpu; 762 volatile signed int last_irq_cpu;
758 unsigned long irq_zero_count; 763 unsigned irq_zero_count;
764 unsigned fatal_irq_level;
759 765
760 struct efx_spi_device *spi_flash; 766 struct efx_spi_device *spi_flash;
761 struct efx_spi_device *spi_eeprom; 767 struct efx_spi_device *spi_eeprom;
@@ -777,9 +783,6 @@ struct efx_nic {
777 struct net_device *net_dev; 783 struct net_device *net_dev;
778 bool rx_checksum_enabled; 784 bool rx_checksum_enabled;
779 785
780 atomic_t netif_stop_count;
781 spinlock_t netif_stop_lock;
782
783 struct efx_mac_stats mac_stats; 786 struct efx_mac_stats mac_stats;
784 struct efx_buffer stats_buffer; 787 struct efx_buffer stats_buffer;
785 spinlock_t stats_lock; 788 spinlock_t stats_lock;
@@ -924,40 +927,35 @@ struct efx_nic_type {
924 927
925/* Iterate over all used channels */ 928/* Iterate over all used channels */
926#define efx_for_each_channel(_channel, _efx) \ 929#define efx_for_each_channel(_channel, _efx) \
927 for (_channel = &_efx->channel[0]; \ 930 for (_channel = &((_efx)->channel[0]); \
928 _channel < &_efx->channel[EFX_MAX_CHANNELS]; \ 931 _channel < &((_efx)->channel[(efx)->n_channels]); \
929 _channel++) \ 932 _channel++)
930 if (!_channel->used_flags) \
931 continue; \
932 else
933 933
934/* Iterate over all used TX queues */ 934/* Iterate over all used TX queues */
935#define efx_for_each_tx_queue(_tx_queue, _efx) \ 935#define efx_for_each_tx_queue(_tx_queue, _efx) \
936 for (_tx_queue = &_efx->tx_queue[0]; \ 936 for (_tx_queue = &((_efx)->tx_queue[0]); \
937 _tx_queue < &_efx->tx_queue[EFX_TX_QUEUE_COUNT]; \ 937 _tx_queue < &((_efx)->tx_queue[EFX_TXQ_TYPES * \
938 (_efx)->n_tx_channels]); \
938 _tx_queue++) 939 _tx_queue++)
939 940
940/* Iterate over all TX queues belonging to a channel */ 941/* Iterate over all TX queues belonging to a channel */
941#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ 942#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
942 for (_tx_queue = &_channel->efx->tx_queue[0]; \ 943 for (_tx_queue = (_channel)->tx_queue; \
943 _tx_queue < &_channel->efx->tx_queue[EFX_TX_QUEUE_COUNT]; \ 944 _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
944 _tx_queue++) \ 945 _tx_queue++)
945 if (_tx_queue->channel != _channel) \
946 continue; \
947 else
948 946
949/* Iterate over all used RX queues */ 947/* Iterate over all used RX queues */
950#define efx_for_each_rx_queue(_rx_queue, _efx) \ 948#define efx_for_each_rx_queue(_rx_queue, _efx) \
951 for (_rx_queue = &_efx->rx_queue[0]; \ 949 for (_rx_queue = &((_efx)->rx_queue[0]); \
952 _rx_queue < &_efx->rx_queue[_efx->n_rx_queues]; \ 950 _rx_queue < &((_efx)->rx_queue[(_efx)->n_rx_channels]); \
953 _rx_queue++) 951 _rx_queue++)
954 952
955/* Iterate over all RX queues belonging to a channel */ 953/* Iterate over all RX queues belonging to a channel */
956#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ 954#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
957 for (_rx_queue = &_channel->efx->rx_queue[_channel->channel]; \ 955 for (_rx_queue = &((_channel)->efx->rx_queue[(_channel)->channel]); \
958 _rx_queue; \ 956 _rx_queue; \
959 _rx_queue = NULL) \ 957 _rx_queue = NULL) \
960 if (_rx_queue->channel != _channel) \ 958 if (_rx_queue->channel != (_channel)) \
961 continue; \ 959 continue; \
962 else 960 else
963 961
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index b06f8e348307..5d3aaec58556 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -418,7 +418,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
418 FRF_BZ_TX_NON_IP_DROP_DIS, 1); 418 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
419 419
420 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 420 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
421 int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM; 421 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
422 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 422 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
423 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, 423 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
424 !csum); 424 !csum);
@@ -431,10 +431,10 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
431 efx_oword_t reg; 431 efx_oword_t reg;
432 432
433 /* Only 128 bits in this register */ 433 /* Only 128 bits in this register */
434 BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128); 434 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
435 435
436 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG); 436 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
437 if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM) 437 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
438 clear_bit_le(tx_queue->queue, (void *)&reg); 438 clear_bit_le(tx_queue->queue, (void *)&reg);
439 else 439 else
440 set_bit_le(tx_queue->queue, (void *)&reg); 440 set_bit_le(tx_queue->queue, (void *)&reg);
@@ -654,22 +654,23 @@ void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
654 * The NIC batches TX completion events; the message we receive is of 654 * The NIC batches TX completion events; the message we receive is of
655 * the form "complete all TX events up to this index". 655 * the form "complete all TX events up to this index".
656 */ 656 */
657static void 657static int
658efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 658efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
659{ 659{
660 unsigned int tx_ev_desc_ptr; 660 unsigned int tx_ev_desc_ptr;
661 unsigned int tx_ev_q_label; 661 unsigned int tx_ev_q_label;
662 struct efx_tx_queue *tx_queue; 662 struct efx_tx_queue *tx_queue;
663 struct efx_nic *efx = channel->efx; 663 struct efx_nic *efx = channel->efx;
664 int tx_packets = 0;
664 665
665 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 666 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
666 /* Transmit completion */ 667 /* Transmit completion */
667 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 668 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
668 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 669 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
669 tx_queue = &efx->tx_queue[tx_ev_q_label]; 670 tx_queue = &efx->tx_queue[tx_ev_q_label];
670 channel->irq_mod_score += 671 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
671 (tx_ev_desc_ptr - tx_queue->read_count) & 672 EFX_TXQ_MASK);
672 EFX_TXQ_MASK; 673 channel->irq_mod_score += tx_packets;
673 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 674 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
674 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 675 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
675 /* Rewrite the FIFO write pointer */ 676 /* Rewrite the FIFO write pointer */
@@ -689,6 +690,8 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
689 EFX_QWORD_FMT"\n", channel->channel, 690 EFX_QWORD_FMT"\n", channel->channel,
690 EFX_QWORD_VAL(*event)); 691 EFX_QWORD_VAL(*event));
691 } 692 }
693
694 return tx_packets;
692} 695}
693 696
694/* Detect errors included in the rx_evt_pkt_ok bit. */ 697/* Detect errors included in the rx_evt_pkt_ok bit. */
@@ -947,16 +950,17 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
947 } 950 }
948} 951}
949 952
950int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota) 953int efx_nic_process_eventq(struct efx_channel *channel, int budget)
951{ 954{
952 unsigned int read_ptr; 955 unsigned int read_ptr;
953 efx_qword_t event, *p_event; 956 efx_qword_t event, *p_event;
954 int ev_code; 957 int ev_code;
955 int rx_packets = 0; 958 int tx_packets = 0;
959 int spent = 0;
956 960
957 read_ptr = channel->eventq_read_ptr; 961 read_ptr = channel->eventq_read_ptr;
958 962
959 do { 963 for (;;) {
960 p_event = efx_event(channel, read_ptr); 964 p_event = efx_event(channel, read_ptr);
961 event = *p_event; 965 event = *p_event;
962 966
@@ -970,15 +974,23 @@ int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota)
970 /* Clear this event by marking it all ones */ 974 /* Clear this event by marking it all ones */
971 EFX_SET_QWORD(*p_event); 975 EFX_SET_QWORD(*p_event);
972 976
977 /* Increment read pointer */
978 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
979
973 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 980 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
974 981
975 switch (ev_code) { 982 switch (ev_code) {
976 case FSE_AZ_EV_CODE_RX_EV: 983 case FSE_AZ_EV_CODE_RX_EV:
977 efx_handle_rx_event(channel, &event); 984 efx_handle_rx_event(channel, &event);
978 ++rx_packets; 985 if (++spent == budget)
986 goto out;
979 break; 987 break;
980 case FSE_AZ_EV_CODE_TX_EV: 988 case FSE_AZ_EV_CODE_TX_EV:
981 efx_handle_tx_event(channel, &event); 989 tx_packets += efx_handle_tx_event(channel, &event);
990 if (tx_packets >= EFX_TXQ_SIZE) {
991 spent = budget;
992 goto out;
993 }
982 break; 994 break;
983 case FSE_AZ_EV_CODE_DRV_GEN_EV: 995 case FSE_AZ_EV_CODE_DRV_GEN_EV:
984 channel->eventq_magic = EFX_QWORD_FIELD( 996 channel->eventq_magic = EFX_QWORD_FIELD(
@@ -1001,14 +1013,11 @@ int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota)
1001 " (data " EFX_QWORD_FMT ")\n", channel->channel, 1013 " (data " EFX_QWORD_FMT ")\n", channel->channel,
1002 ev_code, EFX_QWORD_VAL(event)); 1014 ev_code, EFX_QWORD_VAL(event));
1003 } 1015 }
1016 }
1004 1017
1005 /* Increment read pointer */ 1018out:
1006 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1007
1008 } while (rx_packets < rx_quota);
1009
1010 channel->eventq_read_ptr = read_ptr; 1019 channel->eventq_read_ptr = read_ptr;
1011 return rx_packets; 1020 return spent;
1012} 1021}
1013 1022
1014 1023
@@ -1123,7 +1132,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1123 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) { 1132 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
1124 ev_queue = EFX_QWORD_FIELD(*event, 1133 ev_queue = EFX_QWORD_FIELD(*event,
1125 FSF_AZ_DRIVER_EV_SUBDATA); 1134 FSF_AZ_DRIVER_EV_SUBDATA);
1126 if (ev_queue < EFX_TX_QUEUE_COUNT) { 1135 if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
1127 tx_queue = efx->tx_queue + ev_queue; 1136 tx_queue = efx->tx_queue + ev_queue;
1128 tx_queue->flushed = FLUSH_DONE; 1137 tx_queue->flushed = FLUSH_DONE;
1129 } 1138 }
@@ -1133,7 +1142,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1133 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 1142 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1134 ev_failed = EFX_QWORD_FIELD( 1143 ev_failed = EFX_QWORD_FIELD(
1135 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1144 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1136 if (ev_queue < efx->n_rx_queues) { 1145 if (ev_queue < efx->n_rx_channels) {
1137 rx_queue = efx->rx_queue + ev_queue; 1146 rx_queue = efx->rx_queue + ev_queue;
1138 rx_queue->flushed = 1147 rx_queue->flushed =
1139 ev_failed ? FLUSH_FAILED : FLUSH_DONE; 1148 ev_failed ? FLUSH_FAILED : FLUSH_DONE;
@@ -1229,15 +1238,9 @@ static inline void efx_nic_interrupts(struct efx_nic *efx,
1229 bool enabled, bool force) 1238 bool enabled, bool force)
1230{ 1239{
1231 efx_oword_t int_en_reg_ker; 1240 efx_oword_t int_en_reg_ker;
1232 unsigned int level = 0;
1233
1234 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1235 /* Set the level always even if we're generating a test
1236 * interrupt, because our legacy interrupt handler is safe */
1237 level = 0x1f;
1238 1241
1239 EFX_POPULATE_OWORD_3(int_en_reg_ker, 1242 EFX_POPULATE_OWORD_3(int_en_reg_ker,
1240 FRF_AZ_KER_INT_LEVE_SEL, level, 1243 FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level,
1241 FRF_AZ_KER_INT_KER, force, 1244 FRF_AZ_KER_INT_KER, force,
1242 FRF_AZ_DRV_INT_EN_KER, enabled); 1245 FRF_AZ_DRV_INT_EN_KER, enabled);
1243 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 1246 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
@@ -1291,11 +1294,10 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1291 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 1294 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1292 EFX_OWORD_VAL(fatal_intr), 1295 EFX_OWORD_VAL(fatal_intr),
1293 error ? "disabling bus mastering" : "no recognised error"); 1296 error ? "disabling bus mastering" : "no recognised error");
1294 if (error == 0)
1295 goto out;
1296 1297
1297 /* If this is a memory parity error dump which blocks are offending */ 1298 /* If this is a memory parity error dump which blocks are offending */
1298 mem_perr = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER); 1299 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1300 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1299 if (mem_perr) { 1301 if (mem_perr) {
1300 efx_oword_t reg; 1302 efx_oword_t reg;
1301 efx_reado(efx, &reg, FR_AZ_MEM_STAT); 1303 efx_reado(efx, &reg, FR_AZ_MEM_STAT);
@@ -1324,7 +1326,7 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1324 "NIC will be disabled\n"); 1326 "NIC will be disabled\n");
1325 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 1327 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1326 } 1328 }
1327out: 1329
1328 return IRQ_HANDLED; 1330 return IRQ_HANDLED;
1329} 1331}
1330 1332
@@ -1346,9 +1348,11 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1346 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1348 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1347 1349
1348 /* Check to see if we have a serious error condition */ 1350 /* Check to see if we have a serious error condition */
1349 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1351 if (queues & (1U << efx->fatal_irq_level)) {
1350 if (unlikely(syserr)) 1352 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1351 return efx_nic_fatal_interrupt(efx); 1353 if (unlikely(syserr))
1354 return efx_nic_fatal_interrupt(efx);
1355 }
1352 1356
1353 if (queues != 0) { 1357 if (queues != 0) {
1354 if (EFX_WORKAROUND_15783(efx)) 1358 if (EFX_WORKAROUND_15783(efx))
@@ -1362,33 +1366,28 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1362 } 1366 }
1363 result = IRQ_HANDLED; 1367 result = IRQ_HANDLED;
1364 1368
1365 } else if (EFX_WORKAROUND_15783(efx) && 1369 } else if (EFX_WORKAROUND_15783(efx)) {
1366 efx->irq_zero_count++ == 0) {
1367 efx_qword_t *event; 1370 efx_qword_t *event;
1368 1371
1369 /* Ensure we rearm all event queues */ 1372 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1373 * because this might be a shared interrupt. */
1374 if (efx->irq_zero_count++ == 0)
1375 result = IRQ_HANDLED;
1376
1377 /* Ensure we schedule or rearm all event queues */
1370 efx_for_each_channel(channel, efx) { 1378 efx_for_each_channel(channel, efx) {
1371 event = efx_event(channel, channel->eventq_read_ptr); 1379 event = efx_event(channel, channel->eventq_read_ptr);
1372 if (efx_event_present(event)) 1380 if (efx_event_present(event))
1373 efx_schedule_channel(channel); 1381 efx_schedule_channel(channel);
1382 else
1383 efx_nic_eventq_read_ack(channel);
1374 } 1384 }
1375
1376 result = IRQ_HANDLED;
1377 } 1385 }
1378 1386
1379 if (result == IRQ_HANDLED) { 1387 if (result == IRQ_HANDLED) {
1380 efx->last_irq_cpu = raw_smp_processor_id(); 1388 efx->last_irq_cpu = raw_smp_processor_id();
1381 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1389 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1382 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1390 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1383 } else if (EFX_WORKAROUND_15783(efx)) {
1384 /* We can't return IRQ_HANDLED more than once on seeing ISR0=0
1385 * because this might be a shared interrupt, but we do need to
1386 * check the channel every time and preemptively rearm it if
1387 * it's idle. */
1388 efx_for_each_channel(channel, efx) {
1389 if (!channel->work_pending)
1390 efx_nic_eventq_read_ack(channel);
1391 }
1392 } 1391 }
1393 1392
1394 return result; 1393 return result;
@@ -1413,9 +1412,11 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1413 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1412 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1414 1413
1415 /* Check to see if we have a serious error condition */ 1414 /* Check to see if we have a serious error condition */
1416 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1415 if (channel->channel == efx->fatal_irq_level) {
1417 if (unlikely(syserr)) 1416 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1418 return efx_nic_fatal_interrupt(efx); 1417 if (unlikely(syserr))
1418 return efx_nic_fatal_interrupt(efx);
1419 }
1419 1420
1420 /* Schedule processing of the channel */ 1421 /* Schedule processing of the channel */
1421 efx_schedule_channel(channel); 1422 efx_schedule_channel(channel);
@@ -1440,7 +1441,7 @@ static void efx_setup_rss_indir_table(struct efx_nic *efx)
1440 offset < FR_BZ_RX_INDIRECTION_TBL + 0x800; 1441 offset < FR_BZ_RX_INDIRECTION_TBL + 0x800;
1441 offset += 0x10) { 1442 offset += 0x10) {
1442 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1443 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1443 i % efx->n_rx_queues); 1444 i % efx->n_rx_channels);
1444 efx_writed(efx, &dword, offset); 1445 efx_writed(efx, &dword, offset);
1445 i++; 1446 i++;
1446 } 1447 }
@@ -1553,6 +1554,13 @@ void efx_nic_init_common(struct efx_nic *efx)
1553 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); 1554 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1554 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); 1555 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1555 1556
1557 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1558 /* Use an interrupt level unused by event queues */
1559 efx->fatal_irq_level = 0x1f;
1560 else
1561 /* Use a valid MSI-X vector */
1562 efx->fatal_irq_level = 0;
1563
1556 /* Enable all the genuinely fatal interrupts. (They are still 1564 /* Enable all the genuinely fatal interrupts. (They are still
1557 * masked by the overall interrupt mask, controlled by 1565 * masked by the overall interrupt mask, controlled by
1558 * falcon_interrupts()). 1566 * falcon_interrupts()).
@@ -1563,6 +1571,8 @@ void efx_nic_init_common(struct efx_nic *efx)
1563 FRF_AZ_ILL_ADR_INT_KER_EN, 1, 1571 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1564 FRF_AZ_RBUF_OWN_INT_KER_EN, 1, 1572 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1565 FRF_AZ_TBUF_OWN_INT_KER_EN, 1); 1573 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1574 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1575 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
1566 EFX_INVERT_OWORD(temp); 1576 EFX_INVERT_OWORD(temp);
1567 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); 1577 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1568 1578
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 9351c0331a47..bbc2c0c2f843 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -135,12 +135,14 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
135 * @fw_build: Firmware build number 135 * @fw_build: Firmware build number
136 * @mcdi: Management-Controller-to-Driver Interface 136 * @mcdi: Management-Controller-to-Driver Interface
137 * @wol_filter_id: Wake-on-LAN packet filter id 137 * @wol_filter_id: Wake-on-LAN packet filter id
138 * @ipv6_rss_key: Toeplitz hash key for IPv6 RSS
138 */ 139 */
139struct siena_nic_data { 140struct siena_nic_data {
140 u64 fw_version; 141 u64 fw_version;
141 u32 fw_build; 142 u32 fw_build;
142 struct efx_mcdi_iface mcdi; 143 struct efx_mcdi_iface mcdi;
143 int wol_filter_id; 144 int wol_filter_id;
145 u8 ipv6_rss_key[40];
144}; 146};
145 147
146extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len); 148extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len);
@@ -156,7 +158,7 @@ extern struct efx_nic_type siena_a0_nic_type;
156 ************************************************************************** 158 **************************************************************************
157 */ 159 */
158 160
159extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info); 161extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
160 162
161/* TX data path */ 163/* TX data path */
162extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue); 164extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
@@ -203,6 +205,7 @@ extern void falcon_irq_ack_a1(struct efx_nic *efx);
203extern int efx_nic_flush_queues(struct efx_nic *efx); 205extern int efx_nic_flush_queues(struct efx_nic *efx);
204extern void falcon_start_nic_stats(struct efx_nic *efx); 206extern void falcon_start_nic_stats(struct efx_nic *efx);
205extern void falcon_stop_nic_stats(struct efx_nic *efx); 207extern void falcon_stop_nic_stats(struct efx_nic *efx);
208extern void falcon_setup_xaui(struct efx_nic *efx);
206extern int falcon_reset_xaui(struct efx_nic *efx); 209extern int falcon_reset_xaui(struct efx_nic *efx);
207extern void efx_nic_init_common(struct efx_nic *efx); 210extern void efx_nic_init_common(struct efx_nic *efx);
208 211
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 0106b1d9aae2..371e86cc090f 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -616,10 +616,10 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
616 goto out; 616 goto out;
617 } 617 }
618 618
619 /* Test every TX queue */ 619 /* Test both types of TX queue */
620 efx_for_each_tx_queue(tx_queue, efx) { 620 efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) {
621 state->offload_csum = (tx_queue->queue == 621 state->offload_csum = (tx_queue->queue &
622 EFX_TX_QUEUE_OFFLOAD_CSUM); 622 EFX_TXQ_TYPE_OFFLOAD);
623 rc = efx_test_loopback(tx_queue, 623 rc = efx_test_loopback(tx_queue,
624 &tests->loopback[mode]); 624 &tests->loopback[mode]);
625 if (rc) 625 if (rc)
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index 643bef72b99d..aed495a4dad7 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -18,8 +18,8 @@
18 */ 18 */
19 19
20struct efx_loopback_self_tests { 20struct efx_loopback_self_tests {
21 int tx_sent[EFX_TX_QUEUE_COUNT]; 21 int tx_sent[EFX_TXQ_TYPES];
22 int tx_done[EFX_TX_QUEUE_COUNT]; 22 int tx_done[EFX_TXQ_TYPES];
23 int rx_good; 23 int rx_good;
24 int rx_bad; 24 int rx_bad;
25}; 25};
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 38dcc42c4f79..727b4228e081 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -13,6 +13,7 @@
13#include <linux/pci.h> 13#include <linux/pci.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/random.h>
16#include "net_driver.h" 17#include "net_driver.h"
17#include "bitfield.h" 18#include "bitfield.h"
18#include "efx.h" 19#include "efx.h"
@@ -274,6 +275,9 @@ static int siena_probe_nic(struct efx_nic *efx)
274 goto fail5; 275 goto fail5;
275 } 276 }
276 277
278 get_random_bytes(&nic_data->ipv6_rss_key,
279 sizeof(nic_data->ipv6_rss_key));
280
277 return 0; 281 return 0;
278 282
279fail5: 283fail5:
@@ -293,6 +297,7 @@ fail1:
293 */ 297 */
294static int siena_init_nic(struct efx_nic *efx) 298static int siena_init_nic(struct efx_nic *efx)
295{ 299{
300 struct siena_nic_data *nic_data = efx->nic_data;
296 efx_oword_t temp; 301 efx_oword_t temp;
297 int rc; 302 int rc;
298 303
@@ -319,6 +324,20 @@ static int siena_init_nic(struct efx_nic *efx)
319 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1); 324 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1);
320 efx_writeo(efx, &temp, FR_AZ_RX_CFG); 325 efx_writeo(efx, &temp, FR_AZ_RX_CFG);
321 326
327 /* Enable IPv6 RSS */
328 BUILD_BUG_ON(sizeof(nic_data->ipv6_rss_key) !=
329 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
330 FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
331 memcpy(&temp, nic_data->ipv6_rss_key, sizeof(temp));
332 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
333 memcpy(&temp, nic_data->ipv6_rss_key + sizeof(temp), sizeof(temp));
334 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
335 EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
336 FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
337 memcpy(&temp, nic_data->ipv6_rss_key + 2 * sizeof(temp),
338 FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
339 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
340
322 if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0) 341 if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0)
323 /* No MCDI operation has been defined to set thresholds */ 342 /* No MCDI operation has been defined to set thresholds */
324 EFX_ERR(efx, "ignoring RX flow control thresholds\n"); 343 EFX_ERR(efx, "ignoring RX flow control thresholds\n");
@@ -456,8 +475,17 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
456 475
457static void siena_update_nic_stats(struct efx_nic *efx) 476static void siena_update_nic_stats(struct efx_nic *efx)
458{ 477{
459 while (siena_try_update_nic_stats(efx) == -EAGAIN) 478 int retry;
460 cpu_relax(); 479
480 /* If we're unlucky enough to read statistics wduring the DMA, wait
481 * up to 10ms for it to finish (typically takes <500us) */
482 for (retry = 0; retry < 100; ++retry) {
483 if (siena_try_update_nic_stats(efx) == 0)
484 return;
485 udelay(100);
486 }
487
488 /* Use the old values instead */
461} 489}
462 490
463static void siena_start_nic_stats(struct efx_nic *efx) 491static void siena_start_nic_stats(struct efx_nic *efx)
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index be0e110a1f73..6bb12a87ef2d 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -30,32 +30,46 @@
30 */ 30 */
31#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u) 31#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u)
32 32
33/* We want to be able to nest calls to netif_stop_queue(), since each 33/* We need to be able to nest calls to netif_tx_stop_queue(), partly
34 * channel can have an individual stop on the queue. 34 * because of the 2 hardware queues associated with each core queue,
35 */ 35 * but also so that we can inhibit TX for reasons other than a full
36void efx_stop_queue(struct efx_nic *efx) 36 * hardware queue. */
37void efx_stop_queue(struct efx_channel *channel)
37{ 38{
38 spin_lock_bh(&efx->netif_stop_lock); 39 struct efx_nic *efx = channel->efx;
40
41 if (!channel->tx_queue)
42 return;
43
44 spin_lock_bh(&channel->tx_stop_lock);
39 EFX_TRACE(efx, "stop TX queue\n"); 45 EFX_TRACE(efx, "stop TX queue\n");
40 46
41 atomic_inc(&efx->netif_stop_count); 47 atomic_inc(&channel->tx_stop_count);
42 netif_stop_queue(efx->net_dev); 48 netif_tx_stop_queue(
49 netdev_get_tx_queue(
50 efx->net_dev,
51 channel->tx_queue->queue / EFX_TXQ_TYPES));
43 52
44 spin_unlock_bh(&efx->netif_stop_lock); 53 spin_unlock_bh(&channel->tx_stop_lock);
45} 54}
46 55
47/* Wake netif's TX queue 56/* Decrement core TX queue stop count and wake it if the count is 0 */
48 * We want to be able to nest calls to netif_stop_queue(), since each 57void efx_wake_queue(struct efx_channel *channel)
49 * channel can have an individual stop on the queue.
50 */
51void efx_wake_queue(struct efx_nic *efx)
52{ 58{
59 struct efx_nic *efx = channel->efx;
60
61 if (!channel->tx_queue)
62 return;
63
53 local_bh_disable(); 64 local_bh_disable();
54 if (atomic_dec_and_lock(&efx->netif_stop_count, 65 if (atomic_dec_and_lock(&channel->tx_stop_count,
55 &efx->netif_stop_lock)) { 66 &channel->tx_stop_lock)) {
56 EFX_TRACE(efx, "waking TX queue\n"); 67 EFX_TRACE(efx, "waking TX queue\n");
57 netif_wake_queue(efx->net_dev); 68 netif_tx_wake_queue(
58 spin_unlock(&efx->netif_stop_lock); 69 netdev_get_tx_queue(
70 efx->net_dev,
71 channel->tx_queue->queue / EFX_TXQ_TYPES));
72 spin_unlock(&channel->tx_stop_lock);
59 } 73 }
60 local_bh_enable(); 74 local_bh_enable();
61} 75}
@@ -298,7 +312,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
298 rc = NETDEV_TX_BUSY; 312 rc = NETDEV_TX_BUSY;
299 313
300 if (tx_queue->stopped == 1) 314 if (tx_queue->stopped == 1)
301 efx_stop_queue(efx); 315 efx_stop_queue(tx_queue->channel);
302 316
303 unwind: 317 unwind:
304 /* Work backwards until we hit the original insert pointer value */ 318 /* Work backwards until we hit the original insert pointer value */
@@ -374,10 +388,9 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
374 if (unlikely(efx->port_inhibited)) 388 if (unlikely(efx->port_inhibited))
375 return NETDEV_TX_BUSY; 389 return NETDEV_TX_BUSY;
376 390
391 tx_queue = &efx->tx_queue[EFX_TXQ_TYPES * skb_get_queue_mapping(skb)];
377 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) 392 if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
378 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_OFFLOAD_CSUM]; 393 tx_queue += EFX_TXQ_TYPE_OFFLOAD;
379 else
380 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
381 394
382 return efx_enqueue_skb(tx_queue, skb); 395 return efx_enqueue_skb(tx_queue, skb);
383} 396}
@@ -405,7 +418,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
405 netif_tx_lock(efx->net_dev); 418 netif_tx_lock(efx->net_dev);
406 if (tx_queue->stopped) { 419 if (tx_queue->stopped) {
407 tx_queue->stopped = 0; 420 tx_queue->stopped = 0;
408 efx_wake_queue(efx); 421 efx_wake_queue(tx_queue->channel);
409 } 422 }
410 netif_tx_unlock(efx->net_dev); 423 netif_tx_unlock(efx->net_dev);
411 } 424 }
@@ -488,7 +501,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
488 /* Release queue's stop on port, if any */ 501 /* Release queue's stop on port, if any */
489 if (tx_queue->stopped) { 502 if (tx_queue->stopped) {
490 tx_queue->stopped = 0; 503 tx_queue->stopped = 0;
491 efx_wake_queue(tx_queue->efx); 504 efx_wake_queue(tx_queue->channel);
492 } 505 }
493} 506}
494 507
@@ -1120,7 +1133,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1120 1133
1121 /* Stop the queue if it wasn't stopped before. */ 1134 /* Stop the queue if it wasn't stopped before. */
1122 if (tx_queue->stopped == 1) 1135 if (tx_queue->stopped == 1)
1123 efx_stop_queue(efx); 1136 efx_stop_queue(tx_queue->channel);
1124 1137
1125 unwind: 1138 unwind:
1126 /* Free the DMA mapping we were in the process of writing out */ 1139 /* Free the DMA mapping we were in the process of writing out */
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index acd9c734e483..518f7fc91473 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -37,7 +37,7 @@
37/* Truncated IPv4 packets can confuse the TX packet parser */ 37/* Truncated IPv4 packets can confuse the TX packet parser */
38#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB 38#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
39/* Legacy ISR read can return zero once */ 39/* Legacy ISR read can return zero once */
40#define EFX_WORKAROUND_15783 EFX_WORKAROUND_SIENA 40#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
41/* Legacy interrupt storm when interrupt fifo fills */ 41/* Legacy interrupt storm when interrupt fifo fills */
42#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA 42#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
43 43
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 3a086d3a7cbf..bf9c05be347b 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -70,18 +70,15 @@
70 VLAN:GSO + CKSUM + Data + skb_frags * DMA */ 70 VLAN:GSO + CKSUM + Data + skb_frags * DMA */
71#define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1)) 71#define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1))
72#define TX_MIN_PENDING (MAX_SKB_TX_LE+1) 72#define TX_MIN_PENDING (MAX_SKB_TX_LE+1)
73#define TX_MAX_PENDING 4096 73#define TX_MAX_PENDING 1024
74#define TX_DEF_PENDING 127 74#define TX_DEF_PENDING 127
75 75
76#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */
77#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
78#define TX_WATCHDOG (5 * HZ) 76#define TX_WATCHDOG (5 * HZ)
79#define NAPI_WEIGHT 64 77#define NAPI_WEIGHT 64
80#define PHY_RETRIES 1000 78#define PHY_RETRIES 1000
81 79
82#define SKY2_EEPROM_MAGIC 0x9955aabb 80#define SKY2_EEPROM_MAGIC 0x9955aabb
83 81
84
85#define RING_NEXT(x,s) (((x)+1) & ((s)-1)) 82#define RING_NEXT(x,s) (((x)+1) & ((s)-1))
86 83
87static const u32 default_msg = 84static const u32 default_msg =
@@ -1132,7 +1129,7 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
1132 if (pci_dma_mapping_error(pdev, re->data_addr)) 1129 if (pci_dma_mapping_error(pdev, re->data_addr))
1133 goto mapping_error; 1130 goto mapping_error;
1134 1131
1135 pci_unmap_len_set(re, data_size, size); 1132 dma_unmap_len_set(re, data_size, size);
1136 1133
1137 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1134 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1138 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1135 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -1154,7 +1151,7 @@ map_page_error:
1154 PCI_DMA_FROMDEVICE); 1151 PCI_DMA_FROMDEVICE);
1155 } 1152 }
1156 1153
1157 pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size), 1154 pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
1158 PCI_DMA_FROMDEVICE); 1155 PCI_DMA_FROMDEVICE);
1159 1156
1160mapping_error: 1157mapping_error:
@@ -1169,7 +1166,7 @@ static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
1169 struct sk_buff *skb = re->skb; 1166 struct sk_buff *skb = re->skb;
1170 int i; 1167 int i;
1171 1168
1172 pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size), 1169 pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
1173 PCI_DMA_FROMDEVICE); 1170 PCI_DMA_FROMDEVICE);
1174 1171
1175 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1172 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
@@ -1196,6 +1193,39 @@ static void rx_set_checksum(struct sky2_port *sky2)
1196 ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); 1193 ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
1197} 1194}
1198 1195
1196/* Enable/disable receive hash calculation (RSS) */
1197static void rx_set_rss(struct net_device *dev)
1198{
1199 struct sky2_port *sky2 = netdev_priv(dev);
1200 struct sky2_hw *hw = sky2->hw;
1201 int i, nkeys = 4;
1202
1203 /* Supports IPv6 and other modes */
1204 if (hw->flags & SKY2_HW_NEW_LE) {
1205 nkeys = 10;
1206 sky2_write32(hw, SK_REG(sky2->port, RSS_CFG), HASH_ALL);
1207 }
1208
1209 /* Program RSS initial values */
1210 if (dev->features & NETIF_F_RXHASH) {
1211 u32 key[nkeys];
1212
1213 get_random_bytes(key, nkeys * sizeof(u32));
1214 for (i = 0; i < nkeys; i++)
1215 sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4),
1216 key[i]);
1217
1218 /* Need to turn on (undocumented) flag to make hashing work */
1219 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T),
1220 RX_STFW_ENA);
1221
1222 sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
1223 BMU_ENA_RX_RSS_HASH);
1224 } else
1225 sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
1226 BMU_DIS_RX_RSS_HASH);
1227}
1228
1199/* 1229/*
1200 * The RX Stop command will not work for Yukon-2 if the BMU does not 1230 * The RX Stop command will not work for Yukon-2 if the BMU does not
1201 * reach the end of packet and since we can't make sure that we have 1231 * reach the end of packet and since we can't make sure that we have
@@ -1428,6 +1458,9 @@ static void sky2_rx_start(struct sky2_port *sky2)
1428 if (!(hw->flags & SKY2_HW_NEW_LE)) 1458 if (!(hw->flags & SKY2_HW_NEW_LE))
1429 rx_set_checksum(sky2); 1459 rx_set_checksum(sky2);
1430 1460
1461 if (!(hw->flags & SKY2_HW_RSS_BROKEN))
1462 rx_set_rss(sky2->netdev);
1463
1431 /* submit Rx ring */ 1464 /* submit Rx ring */
1432 for (i = 0; i < sky2->rx_pending; i++) { 1465 for (i = 0; i < sky2->rx_pending; i++) {
1433 re = sky2->rx_ring + i; 1466 re = sky2->rx_ring + i;
@@ -1662,12 +1695,12 @@ static unsigned tx_le_req(const struct sk_buff *skb)
1662static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re) 1695static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
1663{ 1696{
1664 if (re->flags & TX_MAP_SINGLE) 1697 if (re->flags & TX_MAP_SINGLE)
1665 pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr), 1698 pci_unmap_single(pdev, dma_unmap_addr(re, mapaddr),
1666 pci_unmap_len(re, maplen), 1699 dma_unmap_len(re, maplen),
1667 PCI_DMA_TODEVICE); 1700 PCI_DMA_TODEVICE);
1668 else if (re->flags & TX_MAP_PAGE) 1701 else if (re->flags & TX_MAP_PAGE)
1669 pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr), 1702 pci_unmap_page(pdev, dma_unmap_addr(re, mapaddr),
1670 pci_unmap_len(re, maplen), 1703 dma_unmap_len(re, maplen),
1671 PCI_DMA_TODEVICE); 1704 PCI_DMA_TODEVICE);
1672 re->flags = 0; 1705 re->flags = 0;
1673} 1706}
@@ -1778,8 +1811,8 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
1778 1811
1779 re = sky2->tx_ring + slot; 1812 re = sky2->tx_ring + slot;
1780 re->flags = TX_MAP_SINGLE; 1813 re->flags = TX_MAP_SINGLE;
1781 pci_unmap_addr_set(re, mapaddr, mapping); 1814 dma_unmap_addr_set(re, mapaddr, mapping);
1782 pci_unmap_len_set(re, maplen, len); 1815 dma_unmap_len_set(re, maplen, len);
1783 1816
1784 le = get_tx_le(sky2, &slot); 1817 le = get_tx_le(sky2, &slot);
1785 le->addr = cpu_to_le32(lower_32_bits(mapping)); 1818 le->addr = cpu_to_le32(lower_32_bits(mapping));
@@ -1807,8 +1840,8 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
1807 1840
1808 re = sky2->tx_ring + slot; 1841 re = sky2->tx_ring + slot;
1809 re->flags = TX_MAP_PAGE; 1842 re->flags = TX_MAP_PAGE;
1810 pci_unmap_addr_set(re, mapaddr, mapping); 1843 dma_unmap_addr_set(re, mapaddr, mapping);
1811 pci_unmap_len_set(re, maplen, frag->size); 1844 dma_unmap_len_set(re, maplen, frag->size);
1812 1845
1813 le = get_tx_le(sky2, &slot); 1846 le = get_tx_le(sky2, &slot);
1814 le->addr = cpu_to_le32(lower_32_bits(mapping)); 1847 le->addr = cpu_to_le32(lower_32_bits(mapping));
@@ -2537,6 +2570,14 @@ static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
2537 } 2570 }
2538} 2571}
2539 2572
2573static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
2574{
2575 struct sk_buff *skb;
2576
2577 skb = sky2->rx_ring[sky2->rx_next].skb;
2578 skb->rxhash = le32_to_cpu(status);
2579}
2580
2540/* Process status response ring */ 2581/* Process status response ring */
2541static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) 2582static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2542{ 2583{
@@ -2558,7 +2599,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2558 if (!(opcode & HW_OWNER)) 2599 if (!(opcode & HW_OWNER))
2559 break; 2600 break;
2560 2601
2561 hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE); 2602 hw->st_idx = RING_NEXT(hw->st_idx, hw->st_size);
2562 2603
2563 port = le->css & CSS_LINK_BIT; 2604 port = le->css & CSS_LINK_BIT;
2564 dev = hw->dev[port]; 2605 dev = hw->dev[port];
@@ -2609,6 +2650,10 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2609 sky2_rx_checksum(sky2, status); 2650 sky2_rx_checksum(sky2, status);
2610 break; 2651 break;
2611 2652
2653 case OP_RSS_HASH:
2654 sky2_rx_hash(sky2, status);
2655 break;
2656
2612 case OP_TXINDEXLE: 2657 case OP_TXINDEXLE:
2613 /* TX index reports status for both ports */ 2658 /* TX index reports status for both ports */
2614 sky2_tx_done(hw->dev[0], status & 0xfff); 2659 sky2_tx_done(hw->dev[0], status & 0xfff);
@@ -2963,6 +3008,8 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2963 switch(hw->chip_id) { 3008 switch(hw->chip_id) {
2964 case CHIP_ID_YUKON_XL: 3009 case CHIP_ID_YUKON_XL:
2965 hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY; 3010 hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY;
3011 if (hw->chip_rev < CHIP_REV_YU_XL_A2)
3012 hw->flags |= SKY2_HW_RSS_BROKEN;
2966 break; 3013 break;
2967 3014
2968 case CHIP_ID_YUKON_EC_U: 3015 case CHIP_ID_YUKON_EC_U:
@@ -2988,10 +3035,11 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2988 dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n"); 3035 dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
2989 return -EOPNOTSUPP; 3036 return -EOPNOTSUPP;
2990 } 3037 }
2991 hw->flags = SKY2_HW_GIGABIT; 3038 hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RSS_BROKEN;
2992 break; 3039 break;
2993 3040
2994 case CHIP_ID_YUKON_FE: 3041 case CHIP_ID_YUKON_FE:
3042 hw->flags = SKY2_HW_RSS_BROKEN;
2995 break; 3043 break;
2996 3044
2997 case CHIP_ID_YUKON_FE_P: 3045 case CHIP_ID_YUKON_FE_P:
@@ -3198,7 +3246,7 @@ static void sky2_reset(struct sky2_hw *hw)
3198 for (i = 0; i < hw->ports; i++) 3246 for (i = 0; i < hw->ports; i++)
3199 sky2_gmac_reset(hw, i); 3247 sky2_gmac_reset(hw, i);
3200 3248
3201 memset(hw->st_le, 0, STATUS_LE_BYTES); 3249 memset(hw->st_le, 0, hw->st_size * sizeof(struct sky2_status_le));
3202 hw->st_idx = 0; 3250 hw->st_idx = 0;
3203 3251
3204 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET); 3252 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
@@ -3208,7 +3256,7 @@ static void sky2_reset(struct sky2_hw *hw)
3208 sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32); 3256 sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
3209 3257
3210 /* Set the list last index */ 3258 /* Set the list last index */
3211 sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1); 3259 sky2_write16(hw, STAT_LAST_IDX, hw->st_size - 1);
3212 3260
3213 sky2_write16(hw, STAT_TX_IDX_TH, 10); 3261 sky2_write16(hw, STAT_TX_IDX_TH, 10);
3214 sky2_write8(hw, STAT_FIFO_WM, 16); 3262 sky2_write8(hw, STAT_FIFO_WM, 16);
@@ -4115,6 +4163,25 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
4115 return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len); 4163 return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
4116} 4164}
4117 4165
4166static int sky2_set_flags(struct net_device *dev, u32 data)
4167{
4168 struct sky2_port *sky2 = netdev_priv(dev);
4169
4170 if (data & ~ETH_FLAG_RXHASH)
4171 return -EOPNOTSUPP;
4172
4173 if (data & ETH_FLAG_RXHASH) {
4174 if (sky2->hw->flags & SKY2_HW_RSS_BROKEN)
4175 return -EINVAL;
4176
4177 dev->features |= NETIF_F_RXHASH;
4178 } else
4179 dev->features &= ~NETIF_F_RXHASH;
4180
4181 rx_set_rss(dev);
4182
4183 return 0;
4184}
4118 4185
4119static const struct ethtool_ops sky2_ethtool_ops = { 4186static const struct ethtool_ops sky2_ethtool_ops = {
4120 .get_settings = sky2_get_settings, 4187 .get_settings = sky2_get_settings,
@@ -4146,6 +4213,7 @@ static const struct ethtool_ops sky2_ethtool_ops = {
4146 .phys_id = sky2_phys_id, 4213 .phys_id = sky2_phys_id,
4147 .get_sset_count = sky2_get_sset_count, 4214 .get_sset_count = sky2_get_sset_count,
4148 .get_ethtool_stats = sky2_get_ethtool_stats, 4215 .get_ethtool_stats = sky2_get_ethtool_stats,
4216 .set_flags = sky2_set_flags,
4149}; 4217};
4150 4218
4151#ifdef CONFIG_SKY2_DEBUG 4219#ifdef CONFIG_SKY2_DEBUG
@@ -4256,12 +4324,13 @@ static int sky2_debug_show(struct seq_file *seq, void *v)
4256 napi_disable(&hw->napi); 4324 napi_disable(&hw->napi);
4257 last = sky2_read16(hw, STAT_PUT_IDX); 4325 last = sky2_read16(hw, STAT_PUT_IDX);
4258 4326
4327 seq_printf(seq, "Status ring %u\n", hw->st_size);
4259 if (hw->st_idx == last) 4328 if (hw->st_idx == last)
4260 seq_puts(seq, "Status ring (empty)\n"); 4329 seq_puts(seq, "Status ring (empty)\n");
4261 else { 4330 else {
4262 seq_puts(seq, "Status ring\n"); 4331 seq_puts(seq, "Status ring\n");
4263 for (idx = hw->st_idx; idx != last && idx < STATUS_RING_SIZE; 4332 for (idx = hw->st_idx; idx != last && idx < hw->st_size;
4264 idx = RING_NEXT(idx, STATUS_RING_SIZE)) { 4333 idx = RING_NEXT(idx, hw->st_size)) {
4265 const struct sky2_status_le *le = hw->st_le + idx; 4334 const struct sky2_status_le *le = hw->st_le + idx;
4266 seq_printf(seq, "[%d] %#x %d %#x\n", 4335 seq_printf(seq, "[%d] %#x %d %#x\n",
4267 idx, le->opcode, le->length, le->status); 4336 idx, le->opcode, le->length, le->status);
@@ -4498,6 +4567,10 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4498 if (highmem) 4567 if (highmem)
4499 dev->features |= NETIF_F_HIGHDMA; 4568 dev->features |= NETIF_F_HIGHDMA;
4500 4569
4570 /* Enable receive hashing unless hardware is known broken */
4571 if (!(hw->flags & SKY2_HW_RSS_BROKEN))
4572 dev->features |= NETIF_F_RXHASH;
4573
4501#ifdef SKY2_VLAN_TAG_USED 4574#ifdef SKY2_VLAN_TAG_USED
4502 /* The workaround for FE+ status conflicts with VLAN tag detection. */ 4575 /* The workaround for FE+ status conflicts with VLAN tag detection. */
4503 if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P && 4576 if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
@@ -4689,15 +4762,17 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4689 goto err_out_free_hw; 4762 goto err_out_free_hw;
4690 } 4763 }
4691 4764
4692 /* ring for status responses */
4693 hw->st_le = pci_alloc_consistent(pdev, STATUS_LE_BYTES, &hw->st_dma);
4694 if (!hw->st_le)
4695 goto err_out_iounmap;
4696
4697 err = sky2_init(hw); 4765 err = sky2_init(hw);
4698 if (err) 4766 if (err)
4699 goto err_out_iounmap; 4767 goto err_out_iounmap;
4700 4768
4769 /* ring for status responses */
4770 hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING);
4771 hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
4772 &hw->st_dma);
4773 if (!hw->st_le)
4774 goto err_out_reset;
4775
4701 dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n", 4776 dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n",
4702 sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev); 4777 sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
4703 4778
@@ -4771,8 +4846,10 @@ err_out_unregister:
4771err_out_free_netdev: 4846err_out_free_netdev:
4772 free_netdev(dev); 4847 free_netdev(dev);
4773err_out_free_pci: 4848err_out_free_pci:
4849 pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
4850 hw->st_le, hw->st_dma);
4851err_out_reset:
4774 sky2_write8(hw, B0_CTST, CS_RST_SET); 4852 sky2_write8(hw, B0_CTST, CS_RST_SET);
4775 pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
4776err_out_iounmap: 4853err_out_iounmap:
4777 iounmap(hw->regs); 4854 iounmap(hw->regs);
4778err_out_free_hw: 4855err_out_free_hw:
@@ -4810,7 +4887,8 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
4810 free_irq(pdev->irq, hw); 4887 free_irq(pdev->irq, hw);
4811 if (hw->flags & SKY2_HW_USE_MSI) 4888 if (hw->flags & SKY2_HW_USE_MSI)
4812 pci_disable_msi(pdev); 4889 pci_disable_msi(pdev);
4813 pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); 4890 pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
4891 hw->st_le, hw->st_dma);
4814 pci_release_regions(pdev); 4892 pci_release_regions(pdev);
4815 pci_disable_device(pdev); 4893 pci_disable_device(pdev);
4816 4894
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 0bebfb3638f6..084eff21b67a 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -694,8 +694,21 @@ enum {
694 TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */ 694 TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */
695 TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */ 695 TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */
696 TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */ 696 TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */
697
698 RSS_KEY = 0x0220, /* RSS Key setup */
699 RSS_CFG = 0x0248, /* RSS Configuration */
697}; 700};
698 701
702enum {
703 HASH_TCP_IPV6_EX_CTRL = 1<<5,
704 HASH_IPV6_EX_CTRL = 1<<4,
705 HASH_TCP_IPV6_CTRL = 1<<3,
706 HASH_IPV6_CTRL = 1<<2,
707 HASH_TCP_IPV4_CTRL = 1<<1,
708 HASH_IPV4_CTRL = 1<<0,
709
710 HASH_ALL = 0x3f,
711};
699 712
700enum { 713enum {
701 B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */ 714 B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */
@@ -2169,14 +2182,14 @@ struct tx_ring_info {
2169 unsigned long flags; 2182 unsigned long flags;
2170#define TX_MAP_SINGLE 0x0001 2183#define TX_MAP_SINGLE 0x0001
2171#define TX_MAP_PAGE 0x0002 2184#define TX_MAP_PAGE 0x0002
2172 DECLARE_PCI_UNMAP_ADDR(mapaddr); 2185 DEFINE_DMA_UNMAP_ADDR(mapaddr);
2173 DECLARE_PCI_UNMAP_LEN(maplen); 2186 DEFINE_DMA_UNMAP_LEN(maplen);
2174}; 2187};
2175 2188
2176struct rx_ring_info { 2189struct rx_ring_info {
2177 struct sk_buff *skb; 2190 struct sk_buff *skb;
2178 dma_addr_t data_addr; 2191 dma_addr_t data_addr;
2179 DECLARE_PCI_UNMAP_LEN(data_size); 2192 DEFINE_DMA_UNMAP_LEN(data_size);
2180 dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT]; 2193 dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT];
2181}; 2194};
2182 2195
@@ -2261,6 +2274,7 @@ struct sky2_hw {
2261#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */ 2274#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */
2262#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ 2275#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
2263#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ 2276#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
2277#define SKY2_HW_RSS_BROKEN 0x00000100
2264 2278
2265 u8 chip_id; 2279 u8 chip_id;
2266 u8 chip_rev; 2280 u8 chip_rev;
@@ -2268,6 +2282,7 @@ struct sky2_hw {
2268 u8 ports; 2282 u8 ports;
2269 2283
2270 struct sky2_status_le *st_le; 2284 struct sky2_status_le *st_le;
2285 u32 st_size;
2271 u32 st_idx; 2286 u32 st_idx;
2272 dma_addr_t st_dma; 2287 dma_addr_t st_dma;
2273 2288
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index e94521cf70a1..d76c8154f52b 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -1042,9 +1042,6 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
1042 */ 1042 */
1043 printk("ADDR: %pM\n", dev->dev_addr); 1043 printk("ADDR: %pM\n", dev->dev_addr);
1044 1044
1045 /* set the private data to zero by default */
1046 memset(netdev_priv(dev), 0, sizeof(struct smc_local));
1047
1048 /* Grab the IRQ */ 1045 /* Grab the IRQ */
1049 retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev); 1046 retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev);
1050 if (retval) { 1047 if (retval) {
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 20deb14e98ff..982ff12764a3 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -3004,7 +3004,6 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
3004 dev->base_addr = (long) pdev; 3004 dev->base_addr = (long) pdev;
3005 3005
3006 hp = netdev_priv(dev); 3006 hp = netdev_priv(dev);
3007 memset(hp, 0, sizeof(*hp));
3008 3007
3009 hp->happy_dev = pdev; 3008 hp->happy_dev = pdev;
3010 hp->dma_dev = &pdev->dev; 3009 hp->dma_dev = &pdev->dev;
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index e29f495c6a2b..20ab16192325 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -2033,7 +2033,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2033 /************** priv ****************/ 2033 /************** priv ****************/
2034 priv = nic->priv[port] = netdev_priv(ndev); 2034 priv = nic->priv[port] = netdev_priv(ndev);
2035 2035
2036 memset(priv, 0, sizeof(struct bdx_priv));
2037 priv->pBdxRegs = nic->regs + port * 0x8000; 2036 priv->pBdxRegs = nic->regs + port * 0x8000;
2038 priv->port = port; 2037 priv->port = port;
2039 priv->pdev = pdev; 2038 priv->pdev = pdev;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 0fea6854c4aa..573054ae7b58 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -8700,6 +8700,7 @@ static int tg3_test_msi(struct tg3 *tp)
8700 pci_disable_msi(tp->pdev); 8700 pci_disable_msi(tp->pdev);
8701 8701
8702 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 8702 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8703 tp->napi[0].irq_vec = tp->pdev->irq;
8703 8704
8704 err = tg3_request_irq(tp, 0); 8705 err = tg3_request_irq(tp, 0);
8705 if (err) 8706 if (err)
@@ -12993,6 +12994,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12993 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 12994 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12994 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 12995 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
12995 tp->dev->features |= NETIF_F_IPV6_CSUM; 12996 tp->dev->features |= NETIF_F_IPV6_CSUM;
12997 tp->dev->features |= NETIF_F_GRO;
12996 } 12998 }
12997 12999
12998 /* Determine TSO capabilities */ 13000 /* Determine TSO capabilities */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 43265207d463..6b150c072a41 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -109,6 +109,9 @@ struct tun_struct {
109 109
110 struct tap_filter txflt; 110 struct tap_filter txflt;
111 struct socket socket; 111 struct socket socket;
112 struct socket_wq wq;
113
114 int vnet_hdr_sz;
112 115
113#ifdef TUN_DEBUG 116#ifdef TUN_DEBUG
114 int debug; 117 int debug;
@@ -323,7 +326,7 @@ static void tun_net_uninit(struct net_device *dev)
323 /* Inform the methods they need to stop using the dev. 326 /* Inform the methods they need to stop using the dev.
324 */ 327 */
325 if (tfile) { 328 if (tfile) {
326 wake_up_all(&tun->socket.wait); 329 wake_up_all(&tun->wq.wait);
327 if (atomic_dec_and_test(&tfile->count)) 330 if (atomic_dec_and_test(&tfile->count))
328 __tun_detach(tun); 331 __tun_detach(tun);
329 } 332 }
@@ -398,7 +401,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
398 /* Notify and wake up reader process */ 401 /* Notify and wake up reader process */
399 if (tun->flags & TUN_FASYNC) 402 if (tun->flags & TUN_FASYNC)
400 kill_fasync(&tun->fasync, SIGIO, POLL_IN); 403 kill_fasync(&tun->fasync, SIGIO, POLL_IN);
401 wake_up_interruptible_poll(&tun->socket.wait, POLLIN | 404 wake_up_interruptible_poll(&tun->wq.wait, POLLIN |
402 POLLRDNORM | POLLRDBAND); 405 POLLRDNORM | POLLRDBAND);
403 return NETDEV_TX_OK; 406 return NETDEV_TX_OK;
404 407
@@ -498,7 +501,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
498 501
499 DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name); 502 DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
500 503
501 poll_wait(file, &tun->socket.wait, wait); 504 poll_wait(file, &tun->wq.wait, wait);
502 505
503 if (!skb_queue_empty(&sk->sk_receive_queue)) 506 if (!skb_queue_empty(&sk->sk_receive_queue))
504 mask |= POLLIN | POLLRDNORM; 507 mask |= POLLIN | POLLRDNORM;
@@ -563,7 +566,7 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
563 } 566 }
564 567
565 if (tun->flags & TUN_VNET_HDR) { 568 if (tun->flags & TUN_VNET_HDR) {
566 if ((len -= sizeof(gso)) > count) 569 if ((len -= tun->vnet_hdr_sz) > count)
567 return -EINVAL; 570 return -EINVAL;
568 571
569 if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) 572 if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
@@ -575,7 +578,7 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
575 578
576 if (gso.hdr_len > len) 579 if (gso.hdr_len > len)
577 return -EINVAL; 580 return -EINVAL;
578 offset += sizeof(gso); 581 offset += tun->vnet_hdr_sz;
579 } 582 }
580 583
581 if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) { 584 if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
@@ -718,7 +721,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
718 721
719 if (tun->flags & TUN_VNET_HDR) { 722 if (tun->flags & TUN_VNET_HDR) {
720 struct virtio_net_hdr gso = { 0 }; /* no info leak */ 723 struct virtio_net_hdr gso = { 0 }; /* no info leak */
721 if ((len -= sizeof(gso)) < 0) 724 if ((len -= tun->vnet_hdr_sz) < 0)
722 return -EINVAL; 725 return -EINVAL;
723 726
724 if (skb_is_gso(skb)) { 727 if (skb_is_gso(skb)) {
@@ -749,7 +752,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
749 if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total, 752 if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
750 sizeof(gso)))) 753 sizeof(gso))))
751 return -EFAULT; 754 return -EFAULT;
752 total += sizeof(gso); 755 total += tun->vnet_hdr_sz;
753 } 756 }
754 757
755 len = min_t(int, skb->len, len); 758 len = min_t(int, skb->len, len);
@@ -773,7 +776,7 @@ static ssize_t tun_do_read(struct tun_struct *tun,
773 776
774 DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name); 777 DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
775 778
776 add_wait_queue(&tun->socket.wait, &wait); 779 add_wait_queue(&tun->wq.wait, &wait);
777 while (len) { 780 while (len) {
778 current->state = TASK_INTERRUPTIBLE; 781 current->state = TASK_INTERRUPTIBLE;
779 782
@@ -804,7 +807,7 @@ static ssize_t tun_do_read(struct tun_struct *tun,
804 } 807 }
805 808
806 current->state = TASK_RUNNING; 809 current->state = TASK_RUNNING;
807 remove_wait_queue(&tun->socket.wait, &wait); 810 remove_wait_queue(&tun->wq.wait, &wait);
808 811
809 return ret; 812 return ret;
810} 813}
@@ -861,6 +864,7 @@ static struct rtnl_link_ops tun_link_ops __read_mostly = {
861static void tun_sock_write_space(struct sock *sk) 864static void tun_sock_write_space(struct sock *sk)
862{ 865{
863 struct tun_struct *tun; 866 struct tun_struct *tun;
867 wait_queue_head_t *wqueue;
864 868
865 if (!sock_writeable(sk)) 869 if (!sock_writeable(sk))
866 return; 870 return;
@@ -868,8 +872,9 @@ static void tun_sock_write_space(struct sock *sk)
868 if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) 872 if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
869 return; 873 return;
870 874
871 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 875 wqueue = sk_sleep(sk);
872 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT | 876 if (wqueue && waitqueue_active(wqueue))
877 wake_up_interruptible_sync_poll(wqueue, POLLOUT |
873 POLLWRNORM | POLLWRBAND); 878 POLLWRNORM | POLLWRBAND);
874 879
875 tun = tun_sk(sk)->tun; 880 tun = tun_sk(sk)->tun;
@@ -1033,13 +1038,15 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1033 tun->dev = dev; 1038 tun->dev = dev;
1034 tun->flags = flags; 1039 tun->flags = flags;
1035 tun->txflt.count = 0; 1040 tun->txflt.count = 0;
1041 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
1036 1042
1037 err = -ENOMEM; 1043 err = -ENOMEM;
1038 sk = sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto); 1044 sk = sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
1039 if (!sk) 1045 if (!sk)
1040 goto err_free_dev; 1046 goto err_free_dev;
1041 1047
1042 init_waitqueue_head(&tun->socket.wait); 1048 tun->socket.wq = &tun->wq;
1049 init_waitqueue_head(&tun->wq.wait);
1043 tun->socket.ops = &tun_socket_ops; 1050 tun->socket.ops = &tun_socket_ops;
1044 sock_init_data(&tun->socket, sk); 1051 sock_init_data(&tun->socket, sk);
1045 sk->sk_write_space = tun_sock_write_space; 1052 sk->sk_write_space = tun_sock_write_space;
@@ -1174,6 +1181,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1174 struct sock_fprog fprog; 1181 struct sock_fprog fprog;
1175 struct ifreq ifr; 1182 struct ifreq ifr;
1176 int sndbuf; 1183 int sndbuf;
1184 int vnet_hdr_sz;
1177 int ret; 1185 int ret;
1178 1186
1179 if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) 1187 if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
@@ -1319,6 +1327,25 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1319 tun->socket.sk->sk_sndbuf = sndbuf; 1327 tun->socket.sk->sk_sndbuf = sndbuf;
1320 break; 1328 break;
1321 1329
1330 case TUNGETVNETHDRSZ:
1331 vnet_hdr_sz = tun->vnet_hdr_sz;
1332 if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
1333 ret = -EFAULT;
1334 break;
1335
1336 case TUNSETVNETHDRSZ:
1337 if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
1338 ret = -EFAULT;
1339 break;
1340 }
1341 if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
1342 ret = -EINVAL;
1343 break;
1344 }
1345
1346 tun->vnet_hdr_sz = vnet_hdr_sz;
1347 break;
1348
1322 case TUNATTACHFILTER: 1349 case TUNATTACHFILTER:
1323 /* Can be set only for TAPs */ 1350 /* Can be set only for TAPs */
1324 ret = -EINVAL; 1351 ret = -EINVAL;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index ba56ce4382d9..d7b7018a1de1 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -385,4 +385,25 @@ config USB_CDC_PHONET
385 cellular modem, as found on most Nokia handsets with the 385 cellular modem, as found on most Nokia handsets with the
386 "PC suite" USB profile. 386 "PC suite" USB profile.
387 387
388config USB_IPHETH
389 tristate "Apple iPhone USB Ethernet driver"
390 default n
391 ---help---
392 Module used to share Internet connection (tethering) from your
393 iPhone (Original, 3G and 3GS) to your system.
394 Note that you need userspace libraries and programs that are needed
395 to pair your device with your system and that understand the iPhone
396 protocol.
397
398 For more information: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver
399
400config USB_SIERRA_NET
401 tristate "USB-to-WWAN Driver for Sierra Wireless modems"
402 depends on USB_USBNET
403 help
404 Choose this option if you have a Sierra Wireless USB-to-WWAN device.
405
406 To compile this driver as a module, choose M here: the
407 module will be called sierra_net.
408
388endmenu 409endmenu
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index 82ea62955b56..b13a279663ba 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -23,4 +23,6 @@ obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
23obj-$(CONFIG_USB_USBNET) += usbnet.o 23obj-$(CONFIG_USB_USBNET) += usbnet.o
24obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o 24obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o
25obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o 25obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
26obj-$(CONFIG_USB_IPHETH) += ipheth.o
27obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
26 28
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index c8cdb7f30adc..b3fe0de40469 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -64,6 +64,11 @@ static int is_wireless_rndis(struct usb_interface_descriptor *desc)
64 64
65#endif 65#endif
66 66
67static const u8 mbm_guid[16] = {
68 0xa3, 0x17, 0xa8, 0x8b, 0x04, 0x5e, 0x4f, 0x01,
69 0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a,
70};
71
67/* 72/*
68 * probes control interface, claims data interface, collects the bulk 73 * probes control interface, claims data interface, collects the bulk
69 * endpoints, activates data interface (if needed), maybe sets MTU. 74 * endpoints, activates data interface (if needed), maybe sets MTU.
@@ -79,6 +84,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
79 int status; 84 int status;
80 int rndis; 85 int rndis;
81 struct usb_driver *driver = driver_of(intf); 86 struct usb_driver *driver = driver_of(intf);
87 struct usb_cdc_mdlm_desc *desc = NULL;
88 struct usb_cdc_mdlm_detail_desc *detail = NULL;
82 89
83 if (sizeof dev->data < sizeof *info) 90 if (sizeof dev->data < sizeof *info)
84 return -EDOM; 91 return -EDOM;
@@ -229,6 +236,34 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
229 * side link address we were given. 236 * side link address we were given.
230 */ 237 */
231 break; 238 break;
239 case USB_CDC_MDLM_TYPE:
240 if (desc) {
241 dev_dbg(&intf->dev, "extra MDLM descriptor\n");
242 goto bad_desc;
243 }
244
245 desc = (void *)buf;
246
247 if (desc->bLength != sizeof(*desc))
248 goto bad_desc;
249
250 if (memcmp(&desc->bGUID, mbm_guid, 16))
251 goto bad_desc;
252 break;
253 case USB_CDC_MDLM_DETAIL_TYPE:
254 if (detail) {
255 dev_dbg(&intf->dev, "extra MDLM detail descriptor\n");
256 goto bad_desc;
257 }
258
259 detail = (void *)buf;
260
261 if (detail->bGuidDescriptorType == 0) {
262 if (detail->bLength < (sizeof(*detail) + 1))
263 goto bad_desc;
264 } else
265 goto bad_desc;
266 break;
232 } 267 }
233next_desc: 268next_desc:
234 len -= buf [0]; /* bLength */ 269 len -= buf [0]; /* bLength */
@@ -431,6 +466,7 @@ static const struct driver_info mbm_info = {
431 .bind = cdc_bind, 466 .bind = cdc_bind,
432 .unbind = usbnet_cdc_unbind, 467 .unbind = usbnet_cdc_unbind,
433 .status = cdc_status, 468 .status = cdc_status,
469 .manage_power = cdc_manage_power,
434}; 470};
435 471
436/*-------------------------------------------------------------------------*/ 472/*-------------------------------------------------------------------------*/
@@ -542,80 +578,10 @@ static const struct usb_device_id products [] = {
542 USB_CDC_PROTO_NONE), 578 USB_CDC_PROTO_NONE),
543 .driver_info = (unsigned long) &cdc_info, 579 .driver_info = (unsigned long) &cdc_info,
544}, { 580}, {
545 /* Ericsson F3507g */ 581 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM,
546 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM, 582 USB_CDC_PROTO_NONE),
547 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 583 .driver_info = (unsigned long)&mbm_info,
548 .driver_info = (unsigned long) &mbm_info, 584
549}, {
550 /* Ericsson F3507g ver. 2 */
551 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1902, USB_CLASS_COMM,
552 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
553 .driver_info = (unsigned long) &mbm_info,
554}, {
555 /* Ericsson F3607gw */
556 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1904, USB_CLASS_COMM,
557 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
558 .driver_info = (unsigned long) &mbm_info,
559}, {
560 /* Ericsson F3607gw ver 2 */
561 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1905, USB_CLASS_COMM,
562 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
563 .driver_info = (unsigned long) &mbm_info,
564}, {
565 /* Ericsson F3607gw ver 3 */
566 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM,
567 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
568 .driver_info = (unsigned long) &mbm_info,
569}, {
570 /* Ericsson F3307 */
571 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190a, USB_CLASS_COMM,
572 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
573 .driver_info = (unsigned long) &mbm_info,
574}, {
575 /* Ericsson F3307 ver 2 */
576 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1909, USB_CLASS_COMM,
577 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
578 .driver_info = (unsigned long) &mbm_info,
579}, {
580 /* Ericsson C3607w */
581 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1049, USB_CLASS_COMM,
582 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
583 .driver_info = (unsigned long) &mbm_info,
584}, {
585 /* Ericsson C3607w ver 2 */
586 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190b, USB_CLASS_COMM,
587 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
588 .driver_info = (unsigned long) &mbm_info,
589}, {
590 /* Toshiba F3507g */
591 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
592 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
593 .driver_info = (unsigned long) &mbm_info,
594}, {
595 /* Toshiba F3607gw */
596 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130c, USB_CLASS_COMM,
597 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
598 .driver_info = (unsigned long) &mbm_info,
599}, {
600 /* Toshiba F3607gw ver 2 */
601 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x1311, USB_CLASS_COMM,
602 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
603 .driver_info = (unsigned long) &mbm_info,
604}, {
605 /* Dell F3507g */
606 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM,
607 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
608 .driver_info = (unsigned long) &mbm_info,
609}, {
610 /* Dell F3607gw */
611 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8183, USB_CLASS_COMM,
612 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
613 .driver_info = (unsigned long) &mbm_info,
614}, {
615 /* Dell F3607gw ver 2 */
616 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8184, USB_CLASS_COMM,
617 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
618 .driver_info = (unsigned long) &mbm_info,
619}, 585},
620 { }, // END 586 { }, // END
621}; 587};
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 291add255246..47634b617107 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -240,7 +240,7 @@ static int dm_write_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 valu
240 goto out; 240 goto out;
241 241
242 dm_write_reg(dev, DM_SHARED_ADDR, phy ? (reg | 0x40) : reg); 242 dm_write_reg(dev, DM_SHARED_ADDR, phy ? (reg | 0x40) : reg);
243 dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1c : 0x14); 243 dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1a : 0x12);
244 244
245 for (i = 0; i < DM_TIMEOUT; i++) { 245 for (i = 0; i < DM_TIMEOUT; i++) {
246 u8 tmp; 246 u8 tmp;
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
new file mode 100644
index 000000000000..418825d26f90
--- /dev/null
+++ b/drivers/net/usb/ipheth.c
@@ -0,0 +1,569 @@
1/*
2 * ipheth.c - Apple iPhone USB Ethernet driver
3 *
4 * Copyright (c) 2009 Diego Giagio <diego@giagio.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of GIAGIO.COM nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * Alternatively, provided that this notice is retained in full, this
20 * software may be distributed under the terms of the GNU General
21 * Public License ("GPL") version 2, in which case the provisions of the
22 * GPL apply INSTEAD OF those given above.
23 *
24 * The provided data structures and external interfaces from this code
25 * are not restricted to be used by modules with a GPL compatible license.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE.
39 *
40 *
41 * Attention: iPhone device must be paired, otherwise it won't respond to our
42 * driver. For more info: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver
43 *
44 */
45
46#include <linux/kernel.h>
47#include <linux/errno.h>
48#include <linux/init.h>
49#include <linux/slab.h>
50#include <linux/module.h>
51#include <linux/netdevice.h>
52#include <linux/etherdevice.h>
53#include <linux/ethtool.h>
54#include <linux/usb.h>
55#include <linux/workqueue.h>
56
57#define USB_VENDOR_APPLE 0x05ac
58#define USB_PRODUCT_IPHONE 0x1290
59#define USB_PRODUCT_IPHONE_3G 0x1292
60#define USB_PRODUCT_IPHONE_3GS 0x1294
61
62#define IPHETH_USBINTF_CLASS 255
63#define IPHETH_USBINTF_SUBCLASS 253
64#define IPHETH_USBINTF_PROTO 1
65
66#define IPHETH_BUF_SIZE 1516
67#define IPHETH_TX_TIMEOUT (5 * HZ)
68
69#define IPHETH_INTFNUM 2
70#define IPHETH_ALT_INTFNUM 1
71
72#define IPHETH_CTRL_ENDP 0x00
73#define IPHETH_CTRL_BUF_SIZE 0x40
74#define IPHETH_CTRL_TIMEOUT (5 * HZ)
75
76#define IPHETH_CMD_GET_MACADDR 0x00
77#define IPHETH_CMD_CARRIER_CHECK 0x45
78
79#define IPHETH_CARRIER_CHECK_TIMEOUT round_jiffies_relative(1 * HZ)
80#define IPHETH_CARRIER_ON 0x04
81
82static struct usb_device_id ipheth_table[] = {
83 { USB_DEVICE_AND_INTERFACE_INFO(
84 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE,
85 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
86 IPHETH_USBINTF_PROTO) },
87 { USB_DEVICE_AND_INTERFACE_INFO(
88 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3G,
89 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
90 IPHETH_USBINTF_PROTO) },
91 { USB_DEVICE_AND_INTERFACE_INFO(
92 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS,
93 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
94 IPHETH_USBINTF_PROTO) },
95 { }
96};
97MODULE_DEVICE_TABLE(usb, ipheth_table);
98
99struct ipheth_device {
100 struct usb_device *udev;
101 struct usb_interface *intf;
102 struct net_device *net;
103 struct sk_buff *tx_skb;
104 struct urb *tx_urb;
105 struct urb *rx_urb;
106 unsigned char *tx_buf;
107 unsigned char *rx_buf;
108 unsigned char *ctrl_buf;
109 u8 bulk_in;
110 u8 bulk_out;
111 struct delayed_work carrier_work;
112};
113
114static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags);
115
116static int ipheth_alloc_urbs(struct ipheth_device *iphone)
117{
118 struct urb *tx_urb = NULL;
119 struct urb *rx_urb = NULL;
120 u8 *tx_buf = NULL;
121 u8 *rx_buf = NULL;
122
123 tx_urb = usb_alloc_urb(0, GFP_KERNEL);
124 if (tx_urb == NULL)
125 goto error_nomem;
126
127 rx_urb = usb_alloc_urb(0, GFP_KERNEL);
128 if (rx_urb == NULL)
129 goto free_tx_urb;
130
131 tx_buf = usb_buffer_alloc(iphone->udev,
132 IPHETH_BUF_SIZE,
133 GFP_KERNEL,
134 &tx_urb->transfer_dma);
135 if (tx_buf == NULL)
136 goto free_rx_urb;
137
138 rx_buf = usb_buffer_alloc(iphone->udev,
139 IPHETH_BUF_SIZE,
140 GFP_KERNEL,
141 &rx_urb->transfer_dma);
142 if (rx_buf == NULL)
143 goto free_tx_buf;
144
145
146 iphone->tx_urb = tx_urb;
147 iphone->rx_urb = rx_urb;
148 iphone->tx_buf = tx_buf;
149 iphone->rx_buf = rx_buf;
150 return 0;
151
152free_tx_buf:
153 usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, tx_buf,
154 tx_urb->transfer_dma);
155free_rx_urb:
156 usb_free_urb(rx_urb);
157free_tx_urb:
158 usb_free_urb(tx_urb);
159error_nomem:
160 return -ENOMEM;
161}
162
163static void ipheth_free_urbs(struct ipheth_device *iphone)
164{
165 usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf,
166 iphone->rx_urb->transfer_dma);
167 usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf,
168 iphone->tx_urb->transfer_dma);
169 usb_free_urb(iphone->rx_urb);
170 usb_free_urb(iphone->tx_urb);
171}
172
173static void ipheth_kill_urbs(struct ipheth_device *dev)
174{
175 usb_kill_urb(dev->tx_urb);
176 usb_kill_urb(dev->rx_urb);
177}
178
179static void ipheth_rcvbulk_callback(struct urb *urb)
180{
181 struct ipheth_device *dev;
182 struct sk_buff *skb;
183 int status;
184 char *buf;
185 int len;
186
187 dev = urb->context;
188 if (dev == NULL)
189 return;
190
191 status = urb->status;
192 switch (status) {
193 case -ENOENT:
194 case -ECONNRESET:
195 case -ESHUTDOWN:
196 return;
197 case 0:
198 break;
199 default:
200 err("%s: urb status: %d", __func__, urb->status);
201 return;
202 }
203
204 len = urb->actual_length;
205 buf = urb->transfer_buffer;
206
207 skb = dev_alloc_skb(NET_IP_ALIGN + len);
208 if (!skb) {
209 err("%s: dev_alloc_skb: -ENOMEM", __func__);
210 dev->net->stats.rx_dropped++;
211 return;
212 }
213
214 skb_reserve(skb, NET_IP_ALIGN);
215 memcpy(skb_put(skb, len), buf + NET_IP_ALIGN, len - NET_IP_ALIGN);
216 skb->dev = dev->net;
217 skb->protocol = eth_type_trans(skb, dev->net);
218
219 dev->net->stats.rx_packets++;
220 dev->net->stats.rx_bytes += len;
221
222 netif_rx(skb);
223 ipheth_rx_submit(dev, GFP_ATOMIC);
224}
225
226static void ipheth_sndbulk_callback(struct urb *urb)
227{
228 struct ipheth_device *dev;
229
230 dev = urb->context;
231 if (dev == NULL)
232 return;
233
234 if (urb->status != 0 &&
235 urb->status != -ENOENT &&
236 urb->status != -ECONNRESET &&
237 urb->status != -ESHUTDOWN)
238 err("%s: urb status: %d", __func__, urb->status);
239
240 dev_kfree_skb_irq(dev->tx_skb);
241 netif_wake_queue(dev->net);
242}
243
244static int ipheth_carrier_set(struct ipheth_device *dev)
245{
246 struct usb_device *udev = dev->udev;
247 int retval;
248
249 retval = usb_control_msg(udev,
250 usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP),
251 IPHETH_CMD_CARRIER_CHECK, /* request */
252 0xc0, /* request type */
253 0x00, /* value */
254 0x02, /* index */
255 dev->ctrl_buf, IPHETH_CTRL_BUF_SIZE,
256 IPHETH_CTRL_TIMEOUT);
257 if (retval < 0) {
258 err("%s: usb_control_msg: %d", __func__, retval);
259 return retval;
260 }
261
262 if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON)
263 netif_carrier_on(dev->net);
264 else
265 netif_carrier_off(dev->net);
266
267 return 0;
268}
269
270static void ipheth_carrier_check_work(struct work_struct *work)
271{
272 struct ipheth_device *dev = container_of(work, struct ipheth_device,
273 carrier_work.work);
274
275 ipheth_carrier_set(dev);
276 schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT);
277}
278
279static int ipheth_get_macaddr(struct ipheth_device *dev)
280{
281 struct usb_device *udev = dev->udev;
282 struct net_device *net = dev->net;
283 int retval;
284
285 retval = usb_control_msg(udev,
286 usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP),
287 IPHETH_CMD_GET_MACADDR, /* request */
288 0xc0, /* request type */
289 0x00, /* value */
290 0x02, /* index */
291 dev->ctrl_buf,
292 IPHETH_CTRL_BUF_SIZE,
293 IPHETH_CTRL_TIMEOUT);
294 if (retval < 0) {
295 err("%s: usb_control_msg: %d", __func__, retval);
296 } else if (retval < ETH_ALEN) {
297 err("%s: usb_control_msg: short packet: %d bytes",
298 __func__, retval);
299 retval = -EINVAL;
300 } else {
301 memcpy(net->dev_addr, dev->ctrl_buf, ETH_ALEN);
302 retval = 0;
303 }
304
305 return retval;
306}
307
308static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags)
309{
310 struct usb_device *udev = dev->udev;
311 int retval;
312
313 usb_fill_bulk_urb(dev->rx_urb, udev,
314 usb_rcvbulkpipe(udev, dev->bulk_in),
315 dev->rx_buf, IPHETH_BUF_SIZE,
316 ipheth_rcvbulk_callback,
317 dev);
318 dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
319
320 retval = usb_submit_urb(dev->rx_urb, mem_flags);
321 if (retval)
322 err("%s: usb_submit_urb: %d", __func__, retval);
323 return retval;
324}
325
326static int ipheth_open(struct net_device *net)
327{
328 struct ipheth_device *dev = netdev_priv(net);
329 struct usb_device *udev = dev->udev;
330 int retval = 0;
331
332 usb_set_interface(udev, IPHETH_INTFNUM, IPHETH_ALT_INTFNUM);
333
334 retval = ipheth_carrier_set(dev);
335 if (retval)
336 return retval;
337
338 retval = ipheth_rx_submit(dev, GFP_KERNEL);
339 if (retval)
340 return retval;
341
342 schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT);
343 netif_start_queue(net);
344 return retval;
345}
346
347static int ipheth_close(struct net_device *net)
348{
349 struct ipheth_device *dev = netdev_priv(net);
350
351 cancel_delayed_work_sync(&dev->carrier_work);
352 netif_stop_queue(net);
353 return 0;
354}
355
356static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
357{
358 struct ipheth_device *dev = netdev_priv(net);
359 struct usb_device *udev = dev->udev;
360 int retval;
361
362 /* Paranoid */
363 if (skb->len > IPHETH_BUF_SIZE) {
364 WARN(1, "%s: skb too large: %d bytes", __func__, skb->len);
365 dev->net->stats.tx_dropped++;
366 dev_kfree_skb_irq(skb);
367 return NETDEV_TX_OK;
368 }
369
370 memcpy(dev->tx_buf, skb->data, skb->len);
371 if (skb->len < IPHETH_BUF_SIZE)
372 memset(dev->tx_buf + skb->len, 0, IPHETH_BUF_SIZE - skb->len);
373
374 usb_fill_bulk_urb(dev->tx_urb, udev,
375 usb_sndbulkpipe(udev, dev->bulk_out),
376 dev->tx_buf, IPHETH_BUF_SIZE,
377 ipheth_sndbulk_callback,
378 dev);
379 dev->tx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
380
381 retval = usb_submit_urb(dev->tx_urb, GFP_ATOMIC);
382 if (retval) {
383 err("%s: usb_submit_urb: %d", __func__, retval);
384 dev->net->stats.tx_errors++;
385 dev_kfree_skb_irq(skb);
386 } else {
387 dev->tx_skb = skb;
388
389 dev->net->stats.tx_packets++;
390 dev->net->stats.tx_bytes += skb->len;
391 netif_stop_queue(net);
392 }
393
394 return NETDEV_TX_OK;
395}
396
397static void ipheth_tx_timeout(struct net_device *net)
398{
399 struct ipheth_device *dev = netdev_priv(net);
400
401 err("%s: TX timeout", __func__);
402 dev->net->stats.tx_errors++;
403 usb_unlink_urb(dev->tx_urb);
404}
405
406static struct net_device_stats *ipheth_stats(struct net_device *net)
407{
408 struct ipheth_device *dev = netdev_priv(net);
409 return &dev->net->stats;
410}
411
412static u32 ipheth_ethtool_op_get_link(struct net_device *net)
413{
414 struct ipheth_device *dev = netdev_priv(net);
415 return netif_carrier_ok(dev->net);
416}
417
418static struct ethtool_ops ops = {
419 .get_link = ipheth_ethtool_op_get_link
420};
421
422static const struct net_device_ops ipheth_netdev_ops = {
423 .ndo_open = &ipheth_open,
424 .ndo_stop = &ipheth_close,
425 .ndo_start_xmit = &ipheth_tx,
426 .ndo_tx_timeout = &ipheth_tx_timeout,
427 .ndo_get_stats = &ipheth_stats,
428};
429
430static struct device_type ipheth_type = {
431 .name = "wwan",
432};
433
434static int ipheth_probe(struct usb_interface *intf,
435 const struct usb_device_id *id)
436{
437 struct usb_device *udev = interface_to_usbdev(intf);
438 struct usb_host_interface *hintf;
439 struct usb_endpoint_descriptor *endp;
440 struct ipheth_device *dev;
441 struct net_device *netdev;
442 int i;
443 int retval;
444
445 netdev = alloc_etherdev(sizeof(struct ipheth_device));
446 if (!netdev)
447 return -ENOMEM;
448
449 netdev->netdev_ops = &ipheth_netdev_ops;
450 netdev->watchdog_timeo = IPHETH_TX_TIMEOUT;
451 strcpy(netdev->name, "wwan%d");
452
453 dev = netdev_priv(netdev);
454 dev->udev = udev;
455 dev->net = netdev;
456 dev->intf = intf;
457
458 /* Set up endpoints */
459 hintf = usb_altnum_to_altsetting(intf, IPHETH_ALT_INTFNUM);
460 if (hintf == NULL) {
461 retval = -ENODEV;
462 err("Unable to find alternate settings interface");
463 goto err_endpoints;
464 }
465
466 for (i = 0; i < hintf->desc.bNumEndpoints; i++) {
467 endp = &hintf->endpoint[i].desc;
468 if (usb_endpoint_is_bulk_in(endp))
469 dev->bulk_in = endp->bEndpointAddress;
470 else if (usb_endpoint_is_bulk_out(endp))
471 dev->bulk_out = endp->bEndpointAddress;
472 }
473 if (!(dev->bulk_in && dev->bulk_out)) {
474 retval = -ENODEV;
475 err("Unable to find endpoints");
476 goto err_endpoints;
477 }
478
479 dev->ctrl_buf = kmalloc(IPHETH_CTRL_BUF_SIZE, GFP_KERNEL);
480 if (dev->ctrl_buf == NULL) {
481 retval = -ENOMEM;
482 goto err_alloc_ctrl_buf;
483 }
484
485 retval = ipheth_get_macaddr(dev);
486 if (retval)
487 goto err_get_macaddr;
488
489 INIT_DELAYED_WORK(&dev->carrier_work, ipheth_carrier_check_work);
490
491 retval = ipheth_alloc_urbs(dev);
492 if (retval) {
493 err("error allocating urbs: %d", retval);
494 goto err_alloc_urbs;
495 }
496
497 usb_set_intfdata(intf, dev);
498
499 SET_NETDEV_DEV(netdev, &intf->dev);
500 SET_ETHTOOL_OPS(netdev, &ops);
501 SET_NETDEV_DEVTYPE(netdev, &ipheth_type);
502
503 retval = register_netdev(netdev);
504 if (retval) {
505 err("error registering netdev: %d", retval);
506 retval = -EIO;
507 goto err_register_netdev;
508 }
509
510 dev_info(&intf->dev, "Apple iPhone USB Ethernet device attached\n");
511 return 0;
512
513err_register_netdev:
514 ipheth_free_urbs(dev);
515err_alloc_urbs:
516err_get_macaddr:
517err_alloc_ctrl_buf:
518 kfree(dev->ctrl_buf);
519err_endpoints:
520 free_netdev(netdev);
521 return retval;
522}
523
524static void ipheth_disconnect(struct usb_interface *intf)
525{
526 struct ipheth_device *dev;
527
528 dev = usb_get_intfdata(intf);
529 if (dev != NULL) {
530 unregister_netdev(dev->net);
531 ipheth_kill_urbs(dev);
532 ipheth_free_urbs(dev);
533 kfree(dev->ctrl_buf);
534 free_netdev(dev->net);
535 }
536 usb_set_intfdata(intf, NULL);
537 dev_info(&intf->dev, "Apple iPhone USB Ethernet now disconnected\n");
538}
539
540static struct usb_driver ipheth_driver = {
541 .name = "ipheth",
542 .probe = ipheth_probe,
543 .disconnect = ipheth_disconnect,
544 .id_table = ipheth_table,
545};
546
547static int __init ipheth_init(void)
548{
549 int retval;
550
551 retval = usb_register(&ipheth_driver);
552 if (retval) {
553 err("usb_register failed: %d", retval);
554 return retval;
555 }
556 return 0;
557}
558
559static void __exit ipheth_exit(void)
560{
561 usb_deregister(&ipheth_driver);
562}
563
564module_init(ipheth_init);
565module_exit(ipheth_exit);
566
567MODULE_AUTHOR("Diego Giagio <diego@giagio.com>");
568MODULE_DESCRIPTION("Apple iPhone USB Ethernet driver");
569MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 52671ea043a7..c4c334d9770f 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -145,6 +145,7 @@ static struct usb_device_id usb_klsi_table[] = {
145 { USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */ 145 { USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */
146 { USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */ 146 { USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */
147 { USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */ 147 { USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */
148 { USB_DEVICE(0x07c9, 0xb010) }, /* Allied Telesyn AT-USB10 USB Ethernet Adapter */
148 { USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */ 149 { USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */
149 { USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */ 150 { USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */
150 { USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */ 151 { USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
new file mode 100644
index 000000000000..f1942d69a0d5
--- /dev/null
+++ b/drivers/net/usb/sierra_net.c
@@ -0,0 +1,1004 @@
1/*
2 * USB-to-WWAN Driver for Sierra Wireless modems
3 *
4 * Copyright (C) 2008, 2009, 2010 Paxton Smith, Matthew Safar, Rory Filer
5 * <linux@sierrawireless.com>
6 *
7 * Portions of this based on the cdc_ether driver by David Brownell (2003-2005)
8 * and Ole Andre Vadla Ravnas (ActiveSync) (2006).
9 *
10 * IMPORTANT DISCLAIMER: This driver is not commercially supported by
11 * Sierra Wireless. Use at your own risk.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#define DRIVER_VERSION "v.2.0"
29#define DRIVER_AUTHOR "Paxton Smith, Matthew Safar, Rory Filer"
30#define DRIVER_DESC "USB-to-WWAN Driver for Sierra Wireless modems"
31static const char driver_name[] = "sierra_net";
32
33/* if defined debug messages enabled */
34/*#define DEBUG*/
35
36#include <linux/module.h>
37#include <linux/etherdevice.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
40#include <linux/sched.h>
41#include <linux/timer.h>
42#include <linux/usb.h>
43#include <linux/usb/cdc.h>
44#include <net/ip.h>
45#include <net/udp.h>
46#include <asm/unaligned.h>
47#include <linux/usb/usbnet.h>
48
49#define SWI_USB_REQUEST_GET_FW_ATTR 0x06
50#define SWI_GET_FW_ATTR_MASK 0x08
51
52/* atomic counter partially included in MAC address to make sure 2 devices
53 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
54 */
55static atomic_t iface_counter = ATOMIC_INIT(0);
56
57/*
58 * SYNC Timer Delay definition used to set the expiry time
59 */
60#define SIERRA_NET_SYNCDELAY (2*HZ)
61
62/* Max. MTU supported. The modem buffers are limited to 1500 */
63#define SIERRA_NET_MAX_SUPPORTED_MTU 1500
64
65/* The SIERRA_NET_USBCTL_BUF_LEN defines a buffer size allocated for control
66 * message reception ... and thus the max. received packet.
67 * (May be the cause for parse_hip returning -EINVAL)
68 */
69#define SIERRA_NET_USBCTL_BUF_LEN 1024
70
71/* list of interface numbers - used for constructing interface lists */
72struct sierra_net_iface_info {
73 const u32 infolen; /* number of interface numbers on list */
74 const u8 *ifaceinfo; /* pointer to the array holding the numbers */
75};
76
77struct sierra_net_info_data {
78 u16 rx_urb_size;
79 struct sierra_net_iface_info whitelist;
80};
81
82/* Private data structure */
83struct sierra_net_data {
84
85 u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */
86
87 u16 link_up; /* air link up or down */
88 u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */
89
90 u8 sync_msg[4]; /* SYNC message */
91 u8 shdwn_msg[4]; /* Shutdown message */
92
93 /* Backpointer to the container */
94 struct usbnet *usbnet;
95
96 u8 ifnum; /* interface number */
97
98/* Bit masks, must be a power of 2 */
99#define SIERRA_NET_EVENT_RESP_AVAIL 0x01
100#define SIERRA_NET_TIMER_EXPIRY 0x02
101 unsigned long kevent_flags;
102 struct work_struct sierra_net_kevent;
103 struct timer_list sync_timer; /* For retrying SYNC sequence */
104};
105
106struct param {
107 int is_present;
108 union {
109 void *ptr;
110 u32 dword;
111 u16 word;
112 u8 byte;
113 };
114};
115
116/* HIP message type */
117#define SIERRA_NET_HIP_EXTENDEDID 0x7F
118#define SIERRA_NET_HIP_HSYNC_ID 0x60 /* Modem -> host */
119#define SIERRA_NET_HIP_RESTART_ID 0x62 /* Modem -> host */
120#define SIERRA_NET_HIP_MSYNC_ID 0x20 /* Host -> modem */
121#define SIERRA_NET_HIP_SHUTD_ID 0x26 /* Host -> modem */
122
123#define SIERRA_NET_HIP_EXT_IP_IN_ID 0x0202
124#define SIERRA_NET_HIP_EXT_IP_OUT_ID 0x0002
125
126/* 3G UMTS Link Sense Indication definitions */
127#define SIERRA_NET_HIP_LSI_UMTSID 0x78
128
129/* Reverse Channel Grant Indication HIP message */
130#define SIERRA_NET_HIP_RCGI 0x64
131
132/* LSI Protocol types */
133#define SIERRA_NET_PROTOCOL_UMTS 0x01
134/* LSI Coverage */
135#define SIERRA_NET_COVERAGE_NONE 0x00
136#define SIERRA_NET_COVERAGE_NOPACKET 0x01
137
138/* LSI Session */
139#define SIERRA_NET_SESSION_IDLE 0x00
140/* LSI Link types */
141#define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00
142
143struct lsi_umts {
144 u8 protocol;
145 u8 unused1;
146 __be16 length;
147 /* eventually use a union for the rest - assume umts for now */
148 u8 coverage;
149 u8 unused2[41];
150 u8 session_state;
151 u8 unused3[33];
152 u8 link_type;
153 u8 pdp_addr_len; /* NW-supplied PDP address len */
154 u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */
155 u8 unused4[23];
156 u8 dns1_addr_len; /* NW-supplied 1st DNS address len (bigendian) */
157 u8 dns1_addr[16]; /* NW-supplied 1st DNS address */
158 u8 dns2_addr_len; /* NW-supplied 2nd DNS address len */
159 u8 dns2_addr[16]; /* NW-supplied 2nd DNS address (bigendian)*/
160 u8 wins1_addr_len; /* NW-supplied 1st Wins address len */
161 u8 wins1_addr[16]; /* NW-supplied 1st Wins address (bigendian)*/
162 u8 wins2_addr_len; /* NW-supplied 2nd Wins address len */
163 u8 wins2_addr[16]; /* NW-supplied 2nd Wins address (bigendian) */
164 u8 unused5[4];
165 u8 gw_addr_len; /* NW-supplied GW address len */
166 u8 gw_addr[16]; /* NW-supplied GW address (bigendian) */
167 u8 reserved[8];
168} __attribute__ ((packed));
169
170#define SIERRA_NET_LSI_COMMON_LEN 4
171#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts))
172#define SIERRA_NET_LSI_UMTS_STATUS_LEN \
173 (SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN)
174
175/* Forward definitions */
176static void sierra_sync_timer(unsigned long syncdata);
177static int sierra_net_change_mtu(struct net_device *net, int new_mtu);
178
179/* Our own net device operations structure */
180static const struct net_device_ops sierra_net_device_ops = {
181 .ndo_open = usbnet_open,
182 .ndo_stop = usbnet_stop,
183 .ndo_start_xmit = usbnet_start_xmit,
184 .ndo_tx_timeout = usbnet_tx_timeout,
185 .ndo_change_mtu = sierra_net_change_mtu,
186 .ndo_set_mac_address = eth_mac_addr,
187 .ndo_validate_addr = eth_validate_addr,
188};
189
190/* get private data associated with passed in usbnet device */
191static inline struct sierra_net_data *sierra_net_get_private(struct usbnet *dev)
192{
193 return (struct sierra_net_data *)dev->data[0];
194}
195
196/* set private data associated with passed in usbnet device */
197static inline void sierra_net_set_private(struct usbnet *dev,
198 struct sierra_net_data *priv)
199{
200 dev->data[0] = (unsigned long)priv;
201}
202
203/* is packet IPv4 */
204static inline int is_ip(struct sk_buff *skb)
205{
206 return (skb->protocol == cpu_to_be16(ETH_P_IP));
207}
208
209/*
210 * check passed in packet and make sure that:
211 * - it is linear (no scatter/gather)
212 * - it is ethernet (mac_header properly set)
213 */
214static int check_ethip_packet(struct sk_buff *skb, struct usbnet *dev)
215{
216 skb_reset_mac_header(skb); /* ethernet header */
217
218 if (skb_is_nonlinear(skb)) {
219 netdev_err(dev->net, "Non linear buffer-dropping\n");
220 return 0;
221 }
222
223 if (!pskb_may_pull(skb, ETH_HLEN))
224 return 0;
225 skb->protocol = eth_hdr(skb)->h_proto;
226
227 return 1;
228}
229
230static const u8 *save16bit(struct param *p, const u8 *datap)
231{
232 p->is_present = 1;
233 p->word = get_unaligned_be16(datap);
234 return datap + sizeof(p->word);
235}
236
237static const u8 *save8bit(struct param *p, const u8 *datap)
238{
239 p->is_present = 1;
240 p->byte = *datap;
241 return datap + sizeof(p->byte);
242}
243
244/*----------------------------------------------------------------------------*
245 * BEGIN HIP *
246 *----------------------------------------------------------------------------*/
247/* HIP header */
248#define SIERRA_NET_HIP_HDR_LEN 4
249/* Extended HIP header */
250#define SIERRA_NET_HIP_EXT_HDR_LEN 6
251
252struct hip_hdr {
253 int hdrlen;
254 struct param payload_len;
255 struct param msgid;
256 struct param msgspecific;
257 struct param extmsgid;
258};
259
260static int parse_hip(const u8 *buf, const u32 buflen, struct hip_hdr *hh)
261{
262 const u8 *curp = buf;
263 int padded;
264
265 if (buflen < SIERRA_NET_HIP_HDR_LEN)
266 return -EPROTO;
267
268 curp = save16bit(&hh->payload_len, curp);
269 curp = save8bit(&hh->msgid, curp);
270 curp = save8bit(&hh->msgspecific, curp);
271
272 padded = hh->msgid.byte & 0x80;
273 hh->msgid.byte &= 0x7F; /* 7 bits */
274
275 hh->extmsgid.is_present = (hh->msgid.byte == SIERRA_NET_HIP_EXTENDEDID);
276 if (hh->extmsgid.is_present) {
277 if (buflen < SIERRA_NET_HIP_EXT_HDR_LEN)
278 return -EPROTO;
279
280 hh->payload_len.word &= 0x3FFF; /* 14 bits */
281
282 curp = save16bit(&hh->extmsgid, curp);
283 hh->extmsgid.word &= 0x03FF; /* 10 bits */
284
285 hh->hdrlen = SIERRA_NET_HIP_EXT_HDR_LEN;
286 } else {
287 hh->payload_len.word &= 0x07FF; /* 11 bits */
288 hh->hdrlen = SIERRA_NET_HIP_HDR_LEN;
289 }
290
291 if (padded) {
292 hh->hdrlen++;
293 hh->payload_len.word--;
294 }
295
296 /* if real packet shorter than the claimed length */
297 if (buflen < (hh->hdrlen + hh->payload_len.word))
298 return -EINVAL;
299
300 return 0;
301}
302
303static void build_hip(u8 *buf, const u16 payloadlen,
304 struct sierra_net_data *priv)
305{
306 /* the following doesn't have the full functionality. We
307 * currently build only one kind of header, so it is faster this way
308 */
309 put_unaligned_be16(payloadlen, buf);
310 memcpy(buf+2, priv->tx_hdr_template, sizeof(priv->tx_hdr_template));
311}
312/*----------------------------------------------------------------------------*
313 * END HIP *
314 *----------------------------------------------------------------------------*/
315
316static int sierra_net_send_cmd(struct usbnet *dev,
317 u8 *cmd, int cmdlen, const char * cmd_name)
318{
319 struct sierra_net_data *priv = sierra_net_get_private(dev);
320 int status;
321
322 status = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
323 USB_CDC_SEND_ENCAPSULATED_COMMAND,
324 USB_DIR_OUT|USB_TYPE_CLASS|USB_RECIP_INTERFACE, 0,
325 priv->ifnum, cmd, cmdlen, USB_CTRL_SET_TIMEOUT);
326
327 if (status != cmdlen && status != -ENODEV)
328 netdev_err(dev->net, "Submit %s failed %d\n", cmd_name, status);
329
330 return status;
331}
332
333static int sierra_net_send_sync(struct usbnet *dev)
334{
335 int status;
336 struct sierra_net_data *priv = sierra_net_get_private(dev);
337
338 dev_dbg(&dev->udev->dev, "%s", __func__);
339
340 status = sierra_net_send_cmd(dev, priv->sync_msg,
341 sizeof(priv->sync_msg), "SYNC");
342
343 return status;
344}
345
346static void sierra_net_set_ctx_index(struct sierra_net_data *priv, u8 ctx_ix)
347{
348 dev_dbg(&(priv->usbnet->udev->dev), "%s %d", __func__, ctx_ix);
349 priv->tx_hdr_template[0] = 0x3F;
350 priv->tx_hdr_template[1] = ctx_ix;
351 *((u16 *)&priv->tx_hdr_template[2]) =
352 cpu_to_be16(SIERRA_NET_HIP_EXT_IP_OUT_ID);
353}
354
355static inline int sierra_net_is_valid_addrlen(u8 len)
356{
357 return (len == sizeof(struct in_addr));
358}
359
360static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
361{
362 struct lsi_umts *lsi = (struct lsi_umts *)data;
363
364 if (datalen < sizeof(struct lsi_umts)) {
365 netdev_err(dev->net, "%s: Data length %d, exp %Zu\n",
366 __func__, datalen,
367 sizeof(struct lsi_umts));
368 return -1;
369 }
370
371 if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) {
372 netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
373 __func__, be16_to_cpu(lsi->length),
374 (u32)SIERRA_NET_LSI_UMTS_STATUS_LEN);
375 return -1;
376 }
377
378 /* Validate the protocol - only support UMTS for now */
379 if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) {
380 netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
381 lsi->protocol);
382 return -1;
383 }
384
385 /* Validate the link type */
386 if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) {
387 netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
388 lsi->link_type);
389 return -1;
390 }
391
392 /* Validate the coverage */
393 if (lsi->coverage == SIERRA_NET_COVERAGE_NONE
394 || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
395 netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
396 return 0;
397 }
398
399 /* Validate the session state */
400 if (lsi->session_state == SIERRA_NET_SESSION_IDLE) {
401 netdev_err(dev->net, "Session idle, 0x%02x\n",
402 lsi->session_state);
403 return 0;
404 }
405
406 /* Set link_sense true */
407 return 1;
408}
409
410static void sierra_net_handle_lsi(struct usbnet *dev, char *data,
411 struct hip_hdr *hh)
412{
413 struct sierra_net_data *priv = sierra_net_get_private(dev);
414 int link_up;
415
416 link_up = sierra_net_parse_lsi(dev, data + hh->hdrlen,
417 hh->payload_len.word);
418 if (link_up < 0) {
419 netdev_err(dev->net, "Invalid LSI\n");
420 return;
421 }
422 if (link_up) {
423 sierra_net_set_ctx_index(priv, hh->msgspecific.byte);
424 priv->link_up = 1;
425 netif_carrier_on(dev->net);
426 } else {
427 priv->link_up = 0;
428 netif_carrier_off(dev->net);
429 }
430}
431
432static void sierra_net_dosync(struct usbnet *dev)
433{
434 int status;
435 struct sierra_net_data *priv = sierra_net_get_private(dev);
436
437 dev_dbg(&dev->udev->dev, "%s", __func__);
438
439 /* tell modem we are ready */
440 status = sierra_net_send_sync(dev);
441 if (status < 0)
442 netdev_err(dev->net,
443 "Send SYNC failed, status %d\n", status);
444 status = sierra_net_send_sync(dev);
445 if (status < 0)
446 netdev_err(dev->net,
447 "Send SYNC failed, status %d\n", status);
448
449 /* Now, start a timer and make sure we get the Restart Indication */
450 priv->sync_timer.function = sierra_sync_timer;
451 priv->sync_timer.data = (unsigned long) dev;
452 priv->sync_timer.expires = jiffies + SIERRA_NET_SYNCDELAY;
453 add_timer(&priv->sync_timer);
454}
455
456static void sierra_net_kevent(struct work_struct *work)
457{
458 struct sierra_net_data *priv =
459 container_of(work, struct sierra_net_data, sierra_net_kevent);
460 struct usbnet *dev = priv->usbnet;
461 int len;
462 int err;
463 u8 *buf;
464 u8 ifnum;
465
466 if (test_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags)) {
467 clear_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags);
468
469 /* Query the modem for the LSI message */
470 buf = kzalloc(SIERRA_NET_USBCTL_BUF_LEN, GFP_KERNEL);
471 if (!buf) {
472 netdev_err(dev->net,
473 "failed to allocate buf for LS msg\n");
474 return;
475 }
476 ifnum = priv->ifnum;
477 len = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
478 USB_CDC_GET_ENCAPSULATED_RESPONSE,
479 USB_DIR_IN|USB_TYPE_CLASS|USB_RECIP_INTERFACE,
480 0, ifnum, buf, SIERRA_NET_USBCTL_BUF_LEN,
481 USB_CTRL_SET_TIMEOUT);
482
483 if (len < 0) {
484 netdev_err(dev->net,
485 "usb_control_msg failed, status %d\n", len);
486 } else {
487 struct hip_hdr hh;
488
489 dev_dbg(&dev->udev->dev, "%s: Received status message,"
490 " %04x bytes", __func__, len);
491
492 err = parse_hip(buf, len, &hh);
493 if (err) {
494 netdev_err(dev->net, "%s: Bad packet,"
495 " parse result %d\n", __func__, err);
496 kfree(buf);
497 return;
498 }
499
500 /* Validate packet length */
501 if (len != hh.hdrlen + hh.payload_len.word) {
502 netdev_err(dev->net, "%s: Bad packet, received"
503 " %d, expected %d\n", __func__, len,
504 hh.hdrlen + hh.payload_len.word);
505 kfree(buf);
506 return;
507 }
508
509 /* Switch on received message types */
510 switch (hh.msgid.byte) {
511 case SIERRA_NET_HIP_LSI_UMTSID:
512 dev_dbg(&dev->udev->dev, "LSI for ctx:%d",
513 hh.msgspecific.byte);
514 sierra_net_handle_lsi(dev, buf, &hh);
515 break;
516 case SIERRA_NET_HIP_RESTART_ID:
517 dev_dbg(&dev->udev->dev, "Restart reported: %d,"
518 " stopping sync timer",
519 hh.msgspecific.byte);
520 /* Got sync resp - stop timer & clear mask */
521 del_timer_sync(&priv->sync_timer);
522 clear_bit(SIERRA_NET_TIMER_EXPIRY,
523 &priv->kevent_flags);
524 break;
525 case SIERRA_NET_HIP_HSYNC_ID:
526 dev_dbg(&dev->udev->dev, "SYNC received");
527 err = sierra_net_send_sync(dev);
528 if (err < 0)
529 netdev_err(dev->net,
530 "Send SYNC failed %d\n", err);
531 break;
532 case SIERRA_NET_HIP_EXTENDEDID:
533 netdev_err(dev->net, "Unrecognized HIP msg, "
534 "extmsgid 0x%04x\n", hh.extmsgid.word);
535 break;
536 case SIERRA_NET_HIP_RCGI:
537 /* Ignored */
538 break;
539 default:
540 netdev_err(dev->net, "Unrecognized HIP msg, "
541 "msgid 0x%02x\n", hh.msgid.byte);
542 break;
543 }
544 }
545 kfree(buf);
546 }
547 /* The sync timer bit might be set */
548 if (test_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags)) {
549 clear_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags);
550 dev_dbg(&dev->udev->dev, "Deferred sync timer expiry");
551 sierra_net_dosync(priv->usbnet);
552 }
553
554 if (priv->kevent_flags)
555 dev_dbg(&dev->udev->dev, "sierra_net_kevent done, "
556 "kevent_flags = 0x%lx", priv->kevent_flags);
557}
558
559static void sierra_net_defer_kevent(struct usbnet *dev, int work)
560{
561 struct sierra_net_data *priv = sierra_net_get_private(dev);
562
563 set_bit(work, &priv->kevent_flags);
564 schedule_work(&priv->sierra_net_kevent);
565}
566
567/*
568 * Sync Retransmit Timer Handler. On expiry, kick the work queue
569 */
570void sierra_sync_timer(unsigned long syncdata)
571{
572 struct usbnet *dev = (struct usbnet *)syncdata;
573
574 dev_dbg(&dev->udev->dev, "%s", __func__);
575 /* Kick the tasklet */
576 sierra_net_defer_kevent(dev, SIERRA_NET_TIMER_EXPIRY);
577}
578
579static void sierra_net_status(struct usbnet *dev, struct urb *urb)
580{
581 struct usb_cdc_notification *event;
582
583 dev_dbg(&dev->udev->dev, "%s", __func__);
584
585 if (urb->actual_length < sizeof *event)
586 return;
587
588 /* Add cases to handle other standard notifications. */
589 event = urb->transfer_buffer;
590 switch (event->bNotificationType) {
591 case USB_CDC_NOTIFY_NETWORK_CONNECTION:
592 case USB_CDC_NOTIFY_SPEED_CHANGE:
593 /* USB 305 sends those */
594 break;
595 case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
596 sierra_net_defer_kevent(dev, SIERRA_NET_EVENT_RESP_AVAIL);
597 break;
598 default:
599 netdev_err(dev->net, ": unexpected notification %02x!\n",
600 event->bNotificationType);
601 break;
602 }
603}
604
605static void sierra_net_get_drvinfo(struct net_device *net,
606 struct ethtool_drvinfo *info)
607{
608 /* Inherit standard device info */
609 usbnet_get_drvinfo(net, info);
610 strncpy(info->driver, driver_name, sizeof info->driver);
611 strncpy(info->version, DRIVER_VERSION, sizeof info->version);
612}
613
614static u32 sierra_net_get_link(struct net_device *net)
615{
616 struct usbnet *dev = netdev_priv(net);
617 /* Report link is down whenever the interface is down */
618 return sierra_net_get_private(dev)->link_up && netif_running(net);
619}
620
621static struct ethtool_ops sierra_net_ethtool_ops = {
622 .get_drvinfo = sierra_net_get_drvinfo,
623 .get_link = sierra_net_get_link,
624 .get_msglevel = usbnet_get_msglevel,
625 .set_msglevel = usbnet_set_msglevel,
626 .get_settings = usbnet_get_settings,
627 .set_settings = usbnet_set_settings,
628 .nway_reset = usbnet_nway_reset,
629};
630
631/* MTU can not be more than 1500 bytes, enforce it. */
632static int sierra_net_change_mtu(struct net_device *net, int new_mtu)
633{
634 if (new_mtu > SIERRA_NET_MAX_SUPPORTED_MTU)
635 return -EINVAL;
636
637 return usbnet_change_mtu(net, new_mtu);
638}
639
640static int is_whitelisted(const u8 ifnum,
641 const struct sierra_net_iface_info *whitelist)
642{
643 if (whitelist) {
644 const u8 *list = whitelist->ifaceinfo;
645 int i;
646
647 for (i = 0; i < whitelist->infolen; i++) {
648 if (list[i] == ifnum)
649 return 1;
650 }
651 }
652 return 0;
653}
654
655static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
656{
657 int result = 0;
658 u16 *attrdata;
659
660 attrdata = kmalloc(sizeof(*attrdata), GFP_KERNEL);
661 if (!attrdata)
662 return -ENOMEM;
663
664 result = usb_control_msg(
665 dev->udev,
666 usb_rcvctrlpipe(dev->udev, 0),
667 /* _u8 vendor specific request */
668 SWI_USB_REQUEST_GET_FW_ATTR,
669 USB_DIR_IN | USB_TYPE_VENDOR, /* __u8 request type */
670 0x0000, /* __u16 value not used */
671 0x0000, /* __u16 index not used */
672 attrdata, /* char *data */
673 sizeof(*attrdata), /* __u16 size */
674 USB_CTRL_SET_TIMEOUT); /* int timeout */
675
676 if (result < 0) {
677 kfree(attrdata);
678 return -EIO;
679 }
680
681 *datap = *attrdata;
682
683 kfree(attrdata);
684 return result;
685}
686
687/*
688 * collects the bulk endpoints, the status endpoint.
689 */
690static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
691{
692 u8 ifacenum;
693 u8 numendpoints;
694 u16 fwattr = 0;
695 int status;
696 struct ethhdr *eth;
697 struct sierra_net_data *priv;
698 static const u8 sync_tmplate[sizeof(priv->sync_msg)] = {
699 0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
700 static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = {
701 0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00};
702
703 struct sierra_net_info_data *data =
704 (struct sierra_net_info_data *)dev->driver_info->data;
705
706 dev_dbg(&dev->udev->dev, "%s", __func__);
707
708 ifacenum = intf->cur_altsetting->desc.bInterfaceNumber;
709 /* We only accept certain interfaces */
710 if (!is_whitelisted(ifacenum, &data->whitelist)) {
711 dev_dbg(&dev->udev->dev, "Ignoring interface: %d", ifacenum);
712 return -ENODEV;
713 }
714 numendpoints = intf->cur_altsetting->desc.bNumEndpoints;
715 /* We have three endpoints, bulk in and out, and a status */
716 if (numendpoints != 3) {
717 dev_err(&dev->udev->dev, "Expected 3 endpoints, found: %d",
718 numendpoints);
719 return -ENODEV;
720 }
721 /* Status endpoint set in usbnet_get_endpoints() */
722 dev->status = NULL;
723 status = usbnet_get_endpoints(dev, intf);
724 if (status < 0) {
725 dev_err(&dev->udev->dev, "Error in usbnet_get_endpoints (%d)",
726 status);
727 return -ENODEV;
728 }
729 /* Initialize sierra private data */
730 priv = kzalloc(sizeof *priv, GFP_KERNEL);
731 if (!priv) {
732 dev_err(&dev->udev->dev, "No memory");
733 return -ENOMEM;
734 }
735
736 priv->usbnet = dev;
737 priv->ifnum = ifacenum;
738 dev->net->netdev_ops = &sierra_net_device_ops;
739
740 /* change MAC addr to include, ifacenum, and to be unique */
741 dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
742 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
743
744 /* we will have to manufacture ethernet headers, prepare template */
745 eth = (struct ethhdr *)priv->ethr_hdr_tmpl;
746 memcpy(&eth->h_dest, dev->net->dev_addr, ETH_ALEN);
747 eth->h_proto = cpu_to_be16(ETH_P_IP);
748
749 /* prepare shutdown message template */
750 memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
751 /* set context index initially to 0 - prepares tx hdr template */
752 sierra_net_set_ctx_index(priv, 0);
753
754 /* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */
755 dev->rx_urb_size = data->rx_urb_size;
756 if (dev->udev->speed != USB_SPEED_HIGH)
757 dev->rx_urb_size = min_t(size_t, 4096, data->rx_urb_size);
758
759 dev->net->hard_header_len += SIERRA_NET_HIP_EXT_HDR_LEN;
760 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
761
762 /* Set up the netdev */
763 dev->net->flags |= IFF_NOARP;
764 dev->net->ethtool_ops = &sierra_net_ethtool_ops;
765 netif_carrier_off(dev->net);
766
767 sierra_net_set_private(dev, priv);
768
769 priv->kevent_flags = 0;
770
771 /* Use the shared workqueue */
772 INIT_WORK(&priv->sierra_net_kevent, sierra_net_kevent);
773
774 /* Only need to do this once */
775 init_timer(&priv->sync_timer);
776
777 /* verify fw attributes */
778 status = sierra_net_get_fw_attr(dev, &fwattr);
779 dev_dbg(&dev->udev->dev, "Fw attr: %x\n", fwattr);
780
781 /* test whether firmware supports DHCP */
782 if (!(status == sizeof(fwattr) && (fwattr & SWI_GET_FW_ATTR_MASK))) {
783 /* found incompatible firmware version */
784 dev_err(&dev->udev->dev, "Incompatible driver and firmware"
785 " versions\n");
786 kfree(priv);
787 return -ENODEV;
788 }
789 /* prepare sync message from template */
790 memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg));
791
792 /* initiate the sync sequence */
793 sierra_net_dosync(dev);
794
795 return 0;
796}
797
798static void sierra_net_unbind(struct usbnet *dev, struct usb_interface *intf)
799{
800 int status;
801 struct sierra_net_data *priv = sierra_net_get_private(dev);
802
803 dev_dbg(&dev->udev->dev, "%s", __func__);
804
805 /* Kill the timer then flush the work queue */
806 del_timer_sync(&priv->sync_timer);
807
808 flush_scheduled_work();
809
810 /* tell modem we are going away */
811 status = sierra_net_send_cmd(dev, priv->shdwn_msg,
812 sizeof(priv->shdwn_msg), "Shutdown");
813 if (status < 0)
814 netdev_err(dev->net,
815 "usb_control_msg failed, status %d\n", status);
816
817 sierra_net_set_private(dev, NULL);
818
819 kfree(priv);
820}
821
822static struct sk_buff *sierra_net_skb_clone(struct usbnet *dev,
823 struct sk_buff *skb, int len)
824{
825 struct sk_buff *new_skb;
826
827 /* clone skb */
828 new_skb = skb_clone(skb, GFP_ATOMIC);
829
830 /* remove len bytes from original */
831 skb_pull(skb, len);
832
833 /* trim next packet to it's length */
834 if (new_skb) {
835 skb_trim(new_skb, len);
836 } else {
837 if (netif_msg_rx_err(dev))
838 netdev_err(dev->net, "failed to get skb\n");
839 dev->net->stats.rx_dropped++;
840 }
841
842 return new_skb;
843}
844
845/* ---------------------------- Receive data path ----------------------*/
846static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
847{
848 int err;
849 struct hip_hdr hh;
850 struct sk_buff *new_skb;
851
852 dev_dbg(&dev->udev->dev, "%s", __func__);
853
854 /* could contain multiple packets */
855 while (likely(skb->len)) {
856 err = parse_hip(skb->data, skb->len, &hh);
857 if (err) {
858 if (netif_msg_rx_err(dev))
859 netdev_err(dev->net, "Invalid HIP header %d\n",
860 err);
861 /* dev->net->stats.rx_errors incremented by caller */
862 dev->net->stats.rx_length_errors++;
863 return 0;
864 }
865
866 /* Validate Extended HIP header */
867 if (!hh.extmsgid.is_present
868 || hh.extmsgid.word != SIERRA_NET_HIP_EXT_IP_IN_ID) {
869 if (netif_msg_rx_err(dev))
870 netdev_err(dev->net, "HIP/ETH: Invalid pkt\n");
871
872 dev->net->stats.rx_frame_errors++;
873 /* dev->net->stats.rx_errors incremented by caller */;
874 return 0;
875 }
876
877 skb_pull(skb, hh.hdrlen);
878
879 /* We are going to accept this packet, prepare it */
880 memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl,
881 ETH_HLEN);
882
883 /* Last packet in batch handled by usbnet */
884 if (hh.payload_len.word == skb->len)
885 return 1;
886
887 new_skb = sierra_net_skb_clone(dev, skb, hh.payload_len.word);
888 if (new_skb)
889 usbnet_skb_return(dev, new_skb);
890
891 } /* while */
892
893 return 0;
894}
895
896/* ---------------------------- Transmit data path ----------------------*/
897struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
898 gfp_t flags)
899{
900 struct sierra_net_data *priv = sierra_net_get_private(dev);
901 u16 len;
902 bool need_tail;
903
904 dev_dbg(&dev->udev->dev, "%s", __func__);
905 if (priv->link_up && check_ethip_packet(skb, dev) && is_ip(skb)) {
906 /* enough head room as is? */
907 if (SIERRA_NET_HIP_EXT_HDR_LEN <= skb_headroom(skb)) {
908 /* Save the Eth/IP length and set up HIP hdr */
909 len = skb->len;
910 skb_push(skb, SIERRA_NET_HIP_EXT_HDR_LEN);
911 /* Handle ZLP issue */
912 need_tail = ((len + SIERRA_NET_HIP_EXT_HDR_LEN)
913 % dev->maxpacket == 0);
914 if (need_tail) {
915 if (unlikely(skb_tailroom(skb) == 0)) {
916 netdev_err(dev->net, "tx_fixup:"
917 "no room for packet\n");
918 dev_kfree_skb_any(skb);
919 return NULL;
920 } else {
921 skb->data[skb->len] = 0;
922 __skb_put(skb, 1);
923 len = len + 1;
924 }
925 }
926 build_hip(skb->data, len, priv);
927 return skb;
928 } else {
929 /*
930 * compensate in the future if necessary
931 */
932 netdev_err(dev->net, "tx_fixup: no room for HIP\n");
933 } /* headroom */
934 }
935
936 if (!priv->link_up)
937 dev->net->stats.tx_carrier_errors++;
938
939 /* tx_dropped incremented by usbnet */
940
941 /* filter the packet out, release it */
942 dev_kfree_skb_any(skb);
943 return NULL;
944}
945
946static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };
947static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
948 .rx_urb_size = 8 * 1024,
949 .whitelist = {
950 .infolen = ARRAY_SIZE(sierra_net_ifnum_list),
951 .ifaceinfo = sierra_net_ifnum_list
952 }
953};
954
955static const struct driver_info sierra_net_info_68A3 = {
956 .description = "Sierra Wireless USB-to-WWAN Modem",
957 .flags = FLAG_WWAN | FLAG_SEND_ZLP,
958 .bind = sierra_net_bind,
959 .unbind = sierra_net_unbind,
960 .status = sierra_net_status,
961 .rx_fixup = sierra_net_rx_fixup,
962 .tx_fixup = sierra_net_tx_fixup,
963 .data = (unsigned long)&sierra_net_info_data_68A3,
964};
965
966static const struct usb_device_id products[] = {
967 {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
968 .driver_info = (unsigned long) &sierra_net_info_68A3},
969
970 {}, /* last item */
971};
972MODULE_DEVICE_TABLE(usb, products);
973
974/* We are based on usbnet, so let it handle the USB driver specifics */
975static struct usb_driver sierra_net_driver = {
976 .name = "sierra_net",
977 .id_table = products,
978 .probe = usbnet_probe,
979 .disconnect = usbnet_disconnect,
980 .suspend = usbnet_suspend,
981 .resume = usbnet_resume,
982 .no_dynamic_id = 1,
983};
984
985static int __init sierra_net_init(void)
986{
987 BUILD_BUG_ON(FIELD_SIZEOF(struct usbnet, data)
988 < sizeof(struct cdc_state));
989
990 return usb_register(&sierra_net_driver);
991}
992
993static void __exit sierra_net_exit(void)
994{
995 usb_deregister(&sierra_net_driver);
996}
997
998module_exit(sierra_net_exit);
999module_init(sierra_net_init);
1000
1001MODULE_AUTHOR(DRIVER_AUTHOR);
1002MODULE_DESCRIPTION(DRIVER_DESC);
1003MODULE_VERSION(DRIVER_VERSION);
1004MODULE_LICENSE("GPL");
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
index cd8cb95c5bd7..cf9e15fd8d91 100644
--- a/drivers/net/wan/cycx_x25.c
+++ b/drivers/net/wan/cycx_x25.c
@@ -634,11 +634,12 @@ static netdev_tx_t cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
634 } 634 }
635 } else { /* chan->protocol == ETH_P_X25 */ 635 } else { /* chan->protocol == ETH_P_X25 */
636 switch (skb->data[0]) { 636 switch (skb->data[0]) {
637 case 0: break; 637 case X25_IFACE_DATA:
638 case 1: /* Connect request */ 638 break;
639 case X25_IFACE_CONNECT:
639 cycx_x25_chan_connect(dev); 640 cycx_x25_chan_connect(dev);
640 goto free_packet; 641 goto free_packet;
641 case 2: /* Disconnect request */ 642 case X25_IFACE_DISCONNECT:
642 cycx_x25_chan_disconnect(dev); 643 cycx_x25_chan_disconnect(dev);
643 goto free_packet; 644 goto free_packet;
644 default: 645 default:
@@ -1406,7 +1407,8 @@ static void cycx_x25_set_chan_state(struct net_device *dev, u8 state)
1406 reset_timer(dev); 1407 reset_timer(dev);
1407 1408
1408 if (chan->protocol == ETH_P_X25) 1409 if (chan->protocol == ETH_P_X25)
1409 cycx_x25_chan_send_event(dev, 1); 1410 cycx_x25_chan_send_event(dev,
1411 X25_IFACE_CONNECT);
1410 1412
1411 break; 1413 break;
1412 case WAN_CONNECTING: 1414 case WAN_CONNECTING:
@@ -1424,7 +1426,8 @@ static void cycx_x25_set_chan_state(struct net_device *dev, u8 state)
1424 } 1426 }
1425 1427
1426 if (chan->protocol == ETH_P_X25) 1428 if (chan->protocol == ETH_P_X25)
1427 cycx_x25_chan_send_event(dev, 2); 1429 cycx_x25_chan_send_event(dev,
1430 X25_IFACE_DISCONNECT);
1428 1431
1429 netif_wake_queue(dev); 1432 netif_wake_queue(dev);
1430 break; 1433 break;
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index b9b9d6b01c0b..941f053e650e 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -628,9 +628,15 @@ static void ppp_stop(struct net_device *dev)
628 ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL); 628 ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL);
629} 629}
630 630
631static void ppp_close(struct net_device *dev)
632{
633 ppp_tx_flush();
634}
635
631static struct hdlc_proto proto = { 636static struct hdlc_proto proto = {
632 .start = ppp_start, 637 .start = ppp_start,
633 .stop = ppp_stop, 638 .stop = ppp_stop,
639 .close = ppp_close,
634 .type_trans = ppp_type_trans, 640 .type_trans = ppp_type_trans,
635 .ioctl = ppp_ioctl, 641 .ioctl = ppp_ioctl,
636 .netif_rx = ppp_rx, 642 .netif_rx = ppp_rx,
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index c7adbb79f7cc..70527e5a54a2 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -49,14 +49,14 @@ static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
49 49
50static void x25_connected(struct net_device *dev, int reason) 50static void x25_connected(struct net_device *dev, int reason)
51{ 51{
52 x25_connect_disconnect(dev, reason, 1); 52 x25_connect_disconnect(dev, reason, X25_IFACE_CONNECT);
53} 53}
54 54
55 55
56 56
57static void x25_disconnected(struct net_device *dev, int reason) 57static void x25_disconnected(struct net_device *dev, int reason)
58{ 58{
59 x25_connect_disconnect(dev, reason, 2); 59 x25_connect_disconnect(dev, reason, X25_IFACE_DISCONNECT);
60} 60}
61 61
62 62
@@ -71,7 +71,7 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
71 return NET_RX_DROP; 71 return NET_RX_DROP;
72 72
73 ptr = skb->data; 73 ptr = skb->data;
74 *ptr = 0; 74 *ptr = X25_IFACE_DATA;
75 75
76 skb->protocol = x25_type_trans(skb, dev); 76 skb->protocol = x25_type_trans(skb, dev);
77 return netif_rx(skb); 77 return netif_rx(skb);
@@ -94,13 +94,13 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
94 94
95 /* X.25 to LAPB */ 95 /* X.25 to LAPB */
96 switch (skb->data[0]) { 96 switch (skb->data[0]) {
97 case 0: /* Data to be transmitted */ 97 case X25_IFACE_DATA: /* Data to be transmitted */
98 skb_pull(skb, 1); 98 skb_pull(skb, 1);
99 if ((result = lapb_data_request(dev, skb)) != LAPB_OK) 99 if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
100 dev_kfree_skb(skb); 100 dev_kfree_skb(skb);
101 return NETDEV_TX_OK; 101 return NETDEV_TX_OK;
102 102
103 case 1: 103 case X25_IFACE_CONNECT:
104 if ((result = lapb_connect_request(dev))!= LAPB_OK) { 104 if ((result = lapb_connect_request(dev))!= LAPB_OK) {
105 if (result == LAPB_CONNECTED) 105 if (result == LAPB_CONNECTED)
106 /* Send connect confirm. msg to level 3 */ 106 /* Send connect confirm. msg to level 3 */
@@ -112,7 +112,7 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
112 } 112 }
113 break; 113 break;
114 114
115 case 2: 115 case X25_IFACE_DISCONNECT:
116 if ((result = lapb_disconnect_request(dev)) != LAPB_OK) { 116 if ((result = lapb_disconnect_request(dev)) != LAPB_OK) {
117 if (result == LAPB_NOTCONNECTED) 117 if (result == LAPB_NOTCONNECTED)
118 /* Send disconnect confirm. msg to level 3 */ 118 /* Send disconnect confirm. msg to level 3 */
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 98e2f99903d7..4d4dc38c7290 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -139,7 +139,7 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
139 return NET_RX_DROP; 139 return NET_RX_DROP;
140 140
141 ptr = skb->data; 141 ptr = skb->data;
142 *ptr = 0x00; 142 *ptr = X25_IFACE_DATA;
143 143
144 skb->protocol = x25_type_trans(skb, dev); 144 skb->protocol = x25_type_trans(skb, dev);
145 return netif_rx(skb); 145 return netif_rx(skb);
@@ -161,14 +161,14 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
161 goto drop; 161 goto drop;
162 162
163 switch (skb->data[0]) { 163 switch (skb->data[0]) {
164 case 0x00: 164 case X25_IFACE_DATA:
165 break; 165 break;
166 case 0x01: 166 case X25_IFACE_CONNECT:
167 if ((err = lapb_connect_request(dev)) != LAPB_OK) 167 if ((err = lapb_connect_request(dev)) != LAPB_OK)
168 printk(KERN_ERR "lapbeth: lapb_connect_request " 168 printk(KERN_ERR "lapbeth: lapb_connect_request "
169 "error: %d\n", err); 169 "error: %d\n", err);
170 goto drop; 170 goto drop;
171 case 0x02: 171 case X25_IFACE_DISCONNECT:
172 if ((err = lapb_disconnect_request(dev)) != LAPB_OK) 172 if ((err = lapb_disconnect_request(dev)) != LAPB_OK)
173 printk(KERN_ERR "lapbeth: lapb_disconnect_request " 173 printk(KERN_ERR "lapbeth: lapb_disconnect_request "
174 "err: %d\n", err); 174 "err: %d\n", err);
@@ -225,7 +225,7 @@ static void lapbeth_connected(struct net_device *dev, int reason)
225 } 225 }
226 226
227 ptr = skb_put(skb, 1); 227 ptr = skb_put(skb, 1);
228 *ptr = 0x01; 228 *ptr = X25_IFACE_CONNECT;
229 229
230 skb->protocol = x25_type_trans(skb, dev); 230 skb->protocol = x25_type_trans(skb, dev);
231 netif_rx(skb); 231 netif_rx(skb);
@@ -242,7 +242,7 @@ static void lapbeth_disconnected(struct net_device *dev, int reason)
242 } 242 }
243 243
244 ptr = skb_put(skb, 1); 244 ptr = skb_put(skb, 1);
245 *ptr = 0x02; 245 *ptr = X25_IFACE_DISCONNECT;
246 246
247 skb->protocol = x25_type_trans(skb, dev); 247 skb->protocol = x25_type_trans(skb, dev);
248 netif_rx(skb); 248 netif_rx(skb);
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 80d5c5834a0b..166e77dfffda 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -29,12 +29,12 @@
29#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
30#include <linux/skbuff.h> 30#include <linux/skbuff.h>
31#include <linux/if_arp.h> 31#include <linux/if_arp.h>
32#include <linux/x25.h>
33#include <linux/lapb.h> 32#include <linux/lapb.h>
34#include <linux/init.h> 33#include <linux/init.h>
35#include <linux/rtnetlink.h> 34#include <linux/rtnetlink.h>
36#include <linux/compat.h> 35#include <linux/compat.h>
37#include <linux/slab.h> 36#include <linux/slab.h>
37#include <net/x25device.h>
38#include "x25_asy.h" 38#include "x25_asy.h"
39 39
40#include <net/x25device.h> 40#include <net/x25device.h>
@@ -315,15 +315,15 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
315 } 315 }
316 316
317 switch (skb->data[0]) { 317 switch (skb->data[0]) {
318 case 0x00: 318 case X25_IFACE_DATA:
319 break; 319 break;
320 case 0x01: /* Connection request .. do nothing */ 320 case X25_IFACE_CONNECT: /* Connection request .. do nothing */
321 err = lapb_connect_request(dev); 321 err = lapb_connect_request(dev);
322 if (err != LAPB_OK) 322 if (err != LAPB_OK)
323 printk(KERN_ERR "x25_asy: lapb_connect_request error - %d\n", err); 323 printk(KERN_ERR "x25_asy: lapb_connect_request error - %d\n", err);
324 kfree_skb(skb); 324 kfree_skb(skb);
325 return NETDEV_TX_OK; 325 return NETDEV_TX_OK;
326 case 0x02: /* Disconnect request .. do nothing - hang up ?? */ 326 case X25_IFACE_DISCONNECT: /* do nothing - hang up ?? */
327 err = lapb_disconnect_request(dev); 327 err = lapb_disconnect_request(dev);
328 if (err != LAPB_OK) 328 if (err != LAPB_OK)
329 printk(KERN_ERR "x25_asy: lapb_disconnect_request error - %d\n", err); 329 printk(KERN_ERR "x25_asy: lapb_disconnect_request error - %d\n", err);
@@ -411,7 +411,7 @@ static void x25_asy_connected(struct net_device *dev, int reason)
411 } 411 }
412 412
413 ptr = skb_put(skb, 1); 413 ptr = skb_put(skb, 1);
414 *ptr = 0x01; 414 *ptr = X25_IFACE_CONNECT;
415 415
416 skb->protocol = x25_type_trans(skb, sl->dev); 416 skb->protocol = x25_type_trans(skb, sl->dev);
417 netif_rx(skb); 417 netif_rx(skb);
@@ -430,7 +430,7 @@ static void x25_asy_disconnected(struct net_device *dev, int reason)
430 } 430 }
431 431
432 ptr = skb_put(skb, 1); 432 ptr = skb_put(skb, 1);
433 *ptr = 0x02; 433 *ptr = X25_IFACE_DISCONNECT;
434 434
435 skb->protocol = x25_type_trans(skb, sl->dev); 435 skb->protocol = x25_type_trans(skb, sl->dev);
436 netif_rx(skb); 436 netif_rx(skb);
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 588943660755..2fbe9b4506c0 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -38,6 +38,12 @@ config LIBERTAS_THINFIRM
38 ---help--- 38 ---help---
39 A library for Marvell Libertas 8xxx devices using thinfirm. 39 A library for Marvell Libertas 8xxx devices using thinfirm.
40 40
41config LIBERTAS_THINFIRM_DEBUG
42 bool "Enable full debugging output in the Libertas thin firmware module."
43 depends on LIBERTAS_THINFIRM
44 ---help---
45 Debugging support.
46
41config LIBERTAS_THINFIRM_USB 47config LIBERTAS_THINFIRM_USB
42 tristate "Marvell Libertas 8388 USB 802.11b/g cards with thin firmware" 48 tristate "Marvell Libertas 8388 USB 802.11b/g cards with thin firmware"
43 depends on LIBERTAS_THINFIRM && USB 49 depends on LIBERTAS_THINFIRM && USB
@@ -210,90 +216,7 @@ config USB_NET_RNDIS_WLAN
210 216
211 If you choose to build a module, it'll be called rndis_wlan. 217 If you choose to build a module, it'll be called rndis_wlan.
212 218
213config RTL8180 219source "drivers/net/wireless/rtl818x/Kconfig"
214 tristate "Realtek 8180/8185 PCI support"
215 depends on MAC80211 && PCI && EXPERIMENTAL
216 select EEPROM_93CX6
217 ---help---
218 This is a driver for RTL8180 and RTL8185 based cards.
219 These are PCI based chips found in cards such as:
220
221 (RTL8185 802.11g)
222 A-Link WL54PC
223
224 (RTL8180 802.11b)
225 Belkin F5D6020 v3
226 Belkin F5D6020 v3
227 Dlink DWL-610
228 Dlink DWL-510
229 Netgear MA521
230 Level-One WPC-0101
231 Acer Aspire 1357 LMi
232 VCTnet PC-11B1
233 Ovislink AirLive WL-1120PCM
234 Mentor WL-PCI
235 Linksys WPC11 v4
236 TrendNET TEW-288PI
237 D-Link DWL-520 Rev D
238 Repotec RP-WP7126
239 TP-Link TL-WN250/251
240 Zonet ZEW1000
241 Longshine LCS-8031-R
242 HomeLine HLW-PCC200
243 GigaFast WF721-AEX
244 Planet WL-3553
245 Encore ENLWI-PCI1-NT
246 TrendNET TEW-266PC
247 Gigabyte GN-WLMR101
248 Siemens-fujitsu Amilo D1840W
249 Edimax EW-7126
250 PheeNet WL-11PCIR
251 Tonze PC-2100T
252 Planet WL-8303
253 Dlink DWL-650 v M1
254 Edimax EW-7106
255 Q-Tec 770WC
256 Topcom Skyr@cer 4011b
257 Roper FreeLan 802.11b (edition 2004)
258 Wistron Neweb Corp CB-200B
259 Pentagram HorNET
260 QTec 775WC
261 TwinMOS Booming B Series
262 Micronet SP906BB
263 Sweex LC700010
264 Surecom EP-9428
265 Safecom SWLCR-1100
266
267 Thanks to Realtek for their support!
268
269config RTL8187
270 tristate "Realtek 8187 and 8187B USB support"
271 depends on MAC80211 && USB
272 select EEPROM_93CX6
273 ---help---
274 This is a driver for RTL8187 and RTL8187B based cards.
275 These are USB based chips found in devices such as:
276
277 Netgear WG111v2
278 Level 1 WNC-0301USB
279 Micronet SP907GK V5
280 Encore ENUWI-G2
281 Trendnet TEW-424UB
282 ASUS P5B Deluxe/P5K Premium motherboards
283 Toshiba Satellite Pro series of laptops
284 Asus Wireless Link
285 Linksys WUSB54GC-EU v2
286 (v1 = rt73usb; v3 is rt2070-based,
287 use staging/rt3070 or try rt2800usb)
288
289 Thanks to Realtek for their support!
290
291# If possible, automatically enable LEDs for RTL8187.
292
293config RTL8187_LEDS
294 bool
295 depends on RTL8187 && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = RTL8187)
296 default y
297 220
298config ADM8211 221config ADM8211
299 tristate "ADMtek ADM8211 support" 222 tristate "ADMtek ADM8211 support"
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 0fb419936dff..7a626d4e100f 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1889,6 +1889,7 @@ static void at76_dwork_hw_scan(struct work_struct *work)
1889} 1889}
1890 1890
1891static int at76_hw_scan(struct ieee80211_hw *hw, 1891static int at76_hw_scan(struct ieee80211_hw *hw,
1892 struct ieee80211_vif *vif,
1892 struct cfg80211_scan_request *req) 1893 struct cfg80211_scan_request *req)
1893{ 1894{
1894 struct at76_priv *priv = hw->priv; 1895 struct at76_priv *priv = hw->priv;
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index 0312cee39570..2e9b330f6413 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -927,7 +927,6 @@ static void ar9170_rx_phy_status(struct ar9170 *ar,
927 927
928 /* TODO: we could do something with phy_errors */ 928 /* TODO: we could do something with phy_errors */
929 status->signal = ar->noise[0] + phy->rssi_combined; 929 status->signal = ar->noise[0] + phy->rssi_combined;
930 status->noise = ar->noise[0];
931} 930}
932 931
933static struct sk_buff *ar9170_rx_copy_data(u8 *buf, int len) 932static struct sk_buff *ar9170_rx_copy_data(u8 *buf, int len)
@@ -2548,8 +2547,7 @@ void *ar9170_alloc(size_t priv_size)
2548 BIT(NL80211_IFTYPE_ADHOC); 2547 BIT(NL80211_IFTYPE_ADHOC);
2549 ar->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS | 2548 ar->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
2550 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 2549 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2551 IEEE80211_HW_SIGNAL_DBM | 2550 IEEE80211_HW_SIGNAL_DBM;
2552 IEEE80211_HW_NOISE_DBM;
2553 2551
2554 if (modparam_ht) { 2552 if (modparam_ht) {
2555 ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; 2553 ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 1fbf6b1f9a7e..d32f2828b098 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -71,9 +71,21 @@ struct ath_regulatory {
71 struct reg_dmn_pair_mapping *regpair; 71 struct reg_dmn_pair_mapping *regpair;
72}; 72};
73 73
74/**
75 * struct ath_ops - Register read/write operations
76 *
77 * @read: Register read
78 * @write: Register write
79 * @enable_write_buffer: Enable multiple register writes
80 * @disable_write_buffer: Disable multiple register writes
81 * @write_flush: Flush buffered register writes
82 */
74struct ath_ops { 83struct ath_ops {
75 unsigned int (*read)(void *, u32 reg_offset); 84 unsigned int (*read)(void *, u32 reg_offset);
76 void (*write)(void *, u32 val, u32 reg_offset); 85 void (*write)(void *, u32 val, u32 reg_offset);
86 void (*enable_write_buffer)(void *);
87 void (*disable_write_buffer)(void *);
88 void (*write_flush) (void *);
77}; 89};
78 90
79struct ath_common; 91struct ath_common;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 93005f1d326d..7f5953fac434 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -242,6 +242,8 @@ static int ath5k_set_key(struct ieee80211_hw *hw,
242 struct ieee80211_key_conf *key); 242 struct ieee80211_key_conf *key);
243static int ath5k_get_stats(struct ieee80211_hw *hw, 243static int ath5k_get_stats(struct ieee80211_hw *hw,
244 struct ieee80211_low_level_stats *stats); 244 struct ieee80211_low_level_stats *stats);
245static int ath5k_get_survey(struct ieee80211_hw *hw,
246 int idx, struct survey_info *survey);
245static u64 ath5k_get_tsf(struct ieee80211_hw *hw); 247static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
246static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf); 248static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf);
247static void ath5k_reset_tsf(struct ieee80211_hw *hw); 249static void ath5k_reset_tsf(struct ieee80211_hw *hw);
@@ -267,6 +269,7 @@ static const struct ieee80211_ops ath5k_hw_ops = {
267 .configure_filter = ath5k_configure_filter, 269 .configure_filter = ath5k_configure_filter,
268 .set_key = ath5k_set_key, 270 .set_key = ath5k_set_key,
269 .get_stats = ath5k_get_stats, 271 .get_stats = ath5k_get_stats,
272 .get_survey = ath5k_get_survey,
270 .conf_tx = NULL, 273 .conf_tx = NULL,
271 .get_tsf = ath5k_get_tsf, 274 .get_tsf = ath5k_get_tsf,
272 .set_tsf = ath5k_set_tsf, 275 .set_tsf = ath5k_set_tsf,
@@ -545,8 +548,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
545 SET_IEEE80211_DEV(hw, &pdev->dev); 548 SET_IEEE80211_DEV(hw, &pdev->dev);
546 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 549 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
547 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 550 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
548 IEEE80211_HW_SIGNAL_DBM | 551 IEEE80211_HW_SIGNAL_DBM;
549 IEEE80211_HW_NOISE_DBM;
550 552
551 hw->wiphy->interface_modes = 553 hw->wiphy->interface_modes =
552 BIT(NL80211_IFTYPE_AP) | 554 BIT(NL80211_IFTYPE_AP) |
@@ -2027,8 +2029,7 @@ accept:
2027 rxs->freq = sc->curchan->center_freq; 2029 rxs->freq = sc->curchan->center_freq;
2028 rxs->band = sc->curband->band; 2030 rxs->band = sc->curband->band;
2029 2031
2030 rxs->noise = sc->ah->ah_noise_floor; 2032 rxs->signal = sc->ah->ah_noise_floor + rs.rs_rssi;
2031 rxs->signal = rxs->noise + rs.rs_rssi;
2032 2033
2033 rxs->antenna = rs.rs_antenna; 2034 rxs->antenna = rs.rs_antenna;
2034 2035
@@ -3292,6 +3293,22 @@ ath5k_get_stats(struct ieee80211_hw *hw,
3292 return 0; 3293 return 0;
3293} 3294}
3294 3295
3296static int ath5k_get_survey(struct ieee80211_hw *hw, int idx,
3297 struct survey_info *survey)
3298{
3299 struct ath5k_softc *sc = hw->priv;
3300 struct ieee80211_conf *conf = &hw->conf;
3301
3302 if (idx != 0)
3303 return -ENOENT;
3304
3305 survey->channel = conf->channel;
3306 survey->filled = SURVEY_INFO_NOISE_DBM;
3307 survey->noise = sc->ah->ah_noise_floor;
3308
3309 return 0;
3310}
3311
3295static u64 3312static u64
3296ath5k_get_tsf(struct ieee80211_hw *hw) 3313ath5k_get_tsf(struct ieee80211_hw *hw)
3297{ 3314{
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 174412fc81f8..5212e275f1c7 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -496,6 +496,8 @@ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
496* Beacon control * 496* Beacon control *
497\****************/ 497\****************/
498 498
499#define ATH5K_MAX_TSF_READ 10
500
499/** 501/**
500 * ath5k_hw_get_tsf64 - Get the full 64bit TSF 502 * ath5k_hw_get_tsf64 - Get the full 64bit TSF
501 * 503 *
@@ -505,10 +507,35 @@ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
505 */ 507 */
506u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah) 508u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
507{ 509{
508 u64 tsf = ath5k_hw_reg_read(ah, AR5K_TSF_U32); 510 u32 tsf_lower, tsf_upper1, tsf_upper2;
511 int i;
512
513 /*
514 * While reading TSF upper and then lower part, the clock is still
515 * counting (or jumping in case of IBSS merge) so we might get
516 * inconsistent values. To avoid this, we read the upper part again
517 * and check it has not been changed. We make the hypothesis that a
518 * maximum of 3 changes can happens in a row (we use 10 as a safe
519 * value).
520 *
521 * Impact on performance is pretty small, since in most cases, only
522 * 3 register reads are needed.
523 */
524
525 tsf_upper1 = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
526 for (i = 0; i < ATH5K_MAX_TSF_READ; i++) {
527 tsf_lower = ath5k_hw_reg_read(ah, AR5K_TSF_L32);
528 tsf_upper2 = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
529 if (tsf_upper2 == tsf_upper1)
530 break;
531 tsf_upper1 = tsf_upper2;
532 }
533
534 WARN_ON( i == ATH5K_MAX_TSF_READ );
535
509 ATH5K_TRACE(ah->ah_sc); 536 ATH5K_TRACE(ah->ah_sc);
510 537
511 return ath5k_hw_reg_read(ah, AR5K_TSF_L32) | (tsf << 32); 538 return (((u64)tsf_upper1 << 32) | tsf_lower);
512} 539}
513 540
514/** 541/**
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 97133beda269..dd112be218ab 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -13,16 +13,26 @@ ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
13 13
14obj-$(CONFIG_ATH9K) += ath9k.o 14obj-$(CONFIG_ATH9K) += ath9k.o
15 15
16ath9k_hw-y:= hw.o \ 16ath9k_hw-y:= \
17 ar9002_hw.o \
18 ar9003_hw.o \
19 hw.o \
20 ar9003_phy.o \
21 ar9002_phy.o \
22 ar5008_phy.o \
23 ar9002_calib.o \
24 ar9003_calib.o \
25 calib.o \
17 eeprom.o \ 26 eeprom.o \
18 eeprom_def.o \ 27 eeprom_def.o \
19 eeprom_4k.o \ 28 eeprom_4k.o \
20 eeprom_9287.o \ 29 eeprom_9287.o \
21 calib.o \
22 ani.o \ 30 ani.o \
23 phy.o \
24 btcoex.o \ 31 btcoex.o \
25 mac.o \ 32 mac.o \
33 ar9002_mac.o \
34 ar9003_mac.o \
35 ar9003_eeprom.o
26 36
27obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o 37obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
28 38
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 2a0cd64c2bfb..ba8b20f01594 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include "hw.h" 17#include "hw.h"
18#include "hw-ops.h"
18 19
19static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah, 20static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
20 struct ath9k_channel *chan) 21 struct ath9k_channel *chan)
@@ -37,190 +38,6 @@ static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
37 return 0; 38 return 0;
38} 39}
39 40
40static bool ath9k_hw_ani_control(struct ath_hw *ah,
41 enum ath9k_ani_cmd cmd, int param)
42{
43 struct ar5416AniState *aniState = ah->curani;
44 struct ath_common *common = ath9k_hw_common(ah);
45
46 switch (cmd & ah->ani_function) {
47 case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
48 u32 level = param;
49
50 if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
51 ath_print(common, ATH_DBG_ANI,
52 "level out of range (%u > %u)\n",
53 level,
54 (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
55 return false;
56 }
57
58 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
59 AR_PHY_DESIRED_SZ_TOT_DES,
60 ah->totalSizeDesired[level]);
61 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
62 AR_PHY_AGC_CTL1_COARSE_LOW,
63 ah->coarse_low[level]);
64 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
65 AR_PHY_AGC_CTL1_COARSE_HIGH,
66 ah->coarse_high[level]);
67 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
68 AR_PHY_FIND_SIG_FIRPWR,
69 ah->firpwr[level]);
70
71 if (level > aniState->noiseImmunityLevel)
72 ah->stats.ast_ani_niup++;
73 else if (level < aniState->noiseImmunityLevel)
74 ah->stats.ast_ani_nidown++;
75 aniState->noiseImmunityLevel = level;
76 break;
77 }
78 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
79 const int m1ThreshLow[] = { 127, 50 };
80 const int m2ThreshLow[] = { 127, 40 };
81 const int m1Thresh[] = { 127, 0x4d };
82 const int m2Thresh[] = { 127, 0x40 };
83 const int m2CountThr[] = { 31, 16 };
84 const int m2CountThrLow[] = { 63, 48 };
85 u32 on = param ? 1 : 0;
86
87 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
88 AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
89 m1ThreshLow[on]);
90 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
91 AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
92 m2ThreshLow[on]);
93 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
94 AR_PHY_SFCORR_M1_THRESH,
95 m1Thresh[on]);
96 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
97 AR_PHY_SFCORR_M2_THRESH,
98 m2Thresh[on]);
99 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
100 AR_PHY_SFCORR_M2COUNT_THR,
101 m2CountThr[on]);
102 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
103 AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
104 m2CountThrLow[on]);
105
106 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
107 AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
108 m1ThreshLow[on]);
109 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
110 AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
111 m2ThreshLow[on]);
112 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
113 AR_PHY_SFCORR_EXT_M1_THRESH,
114 m1Thresh[on]);
115 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
116 AR_PHY_SFCORR_EXT_M2_THRESH,
117 m2Thresh[on]);
118
119 if (on)
120 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
121 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
122 else
123 REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
124 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
125
126 if (!on != aniState->ofdmWeakSigDetectOff) {
127 if (on)
128 ah->stats.ast_ani_ofdmon++;
129 else
130 ah->stats.ast_ani_ofdmoff++;
131 aniState->ofdmWeakSigDetectOff = !on;
132 }
133 break;
134 }
135 case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
136 const int weakSigThrCck[] = { 8, 6 };
137 u32 high = param ? 1 : 0;
138
139 REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
140 AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
141 weakSigThrCck[high]);
142 if (high != aniState->cckWeakSigThreshold) {
143 if (high)
144 ah->stats.ast_ani_cckhigh++;
145 else
146 ah->stats.ast_ani_ccklow++;
147 aniState->cckWeakSigThreshold = high;
148 }
149 break;
150 }
151 case ATH9K_ANI_FIRSTEP_LEVEL:{
152 const int firstep[] = { 0, 4, 8 };
153 u32 level = param;
154
155 if (level >= ARRAY_SIZE(firstep)) {
156 ath_print(common, ATH_DBG_ANI,
157 "level out of range (%u > %u)\n",
158 level,
159 (unsigned) ARRAY_SIZE(firstep));
160 return false;
161 }
162 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
163 AR_PHY_FIND_SIG_FIRSTEP,
164 firstep[level]);
165 if (level > aniState->firstepLevel)
166 ah->stats.ast_ani_stepup++;
167 else if (level < aniState->firstepLevel)
168 ah->stats.ast_ani_stepdown++;
169 aniState->firstepLevel = level;
170 break;
171 }
172 case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
173 const int cycpwrThr1[] =
174 { 2, 4, 6, 8, 10, 12, 14, 16 };
175 u32 level = param;
176
177 if (level >= ARRAY_SIZE(cycpwrThr1)) {
178 ath_print(common, ATH_DBG_ANI,
179 "level out of range (%u > %u)\n",
180 level,
181 (unsigned) ARRAY_SIZE(cycpwrThr1));
182 return false;
183 }
184 REG_RMW_FIELD(ah, AR_PHY_TIMING5,
185 AR_PHY_TIMING5_CYCPWR_THR1,
186 cycpwrThr1[level]);
187 if (level > aniState->spurImmunityLevel)
188 ah->stats.ast_ani_spurup++;
189 else if (level < aniState->spurImmunityLevel)
190 ah->stats.ast_ani_spurdown++;
191 aniState->spurImmunityLevel = level;
192 break;
193 }
194 case ATH9K_ANI_PRESENT:
195 break;
196 default:
197 ath_print(common, ATH_DBG_ANI,
198 "invalid cmd %u\n", cmd);
199 return false;
200 }
201
202 ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
203 ath_print(common, ATH_DBG_ANI,
204 "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
205 "ofdmWeakSigDetectOff=%d\n",
206 aniState->noiseImmunityLevel,
207 aniState->spurImmunityLevel,
208 !aniState->ofdmWeakSigDetectOff);
209 ath_print(common, ATH_DBG_ANI,
210 "cckWeakSigThreshold=%d, "
211 "firstepLevel=%d, listenTime=%d\n",
212 aniState->cckWeakSigThreshold,
213 aniState->firstepLevel,
214 aniState->listenTime);
215 ath_print(common, ATH_DBG_ANI,
216 "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
217 aniState->cycleCount,
218 aniState->ofdmPhyErrCount,
219 aniState->cckPhyErrCount);
220
221 return true;
222}
223
224static void ath9k_hw_update_mibstats(struct ath_hw *ah, 41static void ath9k_hw_update_mibstats(struct ath_hw *ah,
225 struct ath9k_mib_stats *stats) 42 struct ath9k_mib_stats *stats)
226{ 43{
@@ -262,11 +79,17 @@ static void ath9k_ani_restart(struct ath_hw *ah)
262 "Writing ofdmbase=%u cckbase=%u\n", 79 "Writing ofdmbase=%u cckbase=%u\n",
263 aniState->ofdmPhyErrBase, 80 aniState->ofdmPhyErrBase,
264 aniState->cckPhyErrBase); 81 aniState->cckPhyErrBase);
82
83 ENABLE_REGWRITE_BUFFER(ah);
84
265 REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase); 85 REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase);
266 REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase); 86 REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase);
267 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); 87 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
268 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); 88 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
269 89
90 REGWRITE_BUFFER_FLUSH(ah);
91 DISABLE_REGWRITE_BUFFER(ah);
92
270 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); 93 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
271 94
272 aniState->ofdmPhyErrCount = 0; 95 aniState->ofdmPhyErrCount = 0;
@@ -540,8 +363,14 @@ void ath9k_ani_reset(struct ath_hw *ah)
540 ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) & 363 ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) &
541 ~ATH9K_RX_FILTER_PHYERR); 364 ~ATH9K_RX_FILTER_PHYERR);
542 ath9k_ani_restart(ah); 365 ath9k_ani_restart(ah);
366
367 ENABLE_REGWRITE_BUFFER(ah);
368
543 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); 369 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
544 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); 370 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
371
372 REGWRITE_BUFFER_FLUSH(ah);
373 DISABLE_REGWRITE_BUFFER(ah);
545} 374}
546 375
547void ath9k_hw_ani_monitor(struct ath_hw *ah, 376void ath9k_hw_ani_monitor(struct ath_hw *ah,
@@ -639,6 +468,8 @@ void ath9k_enable_mib_counters(struct ath_hw *ah)
639 468
640 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); 469 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
641 470
471 ENABLE_REGWRITE_BUFFER(ah);
472
642 REG_WRITE(ah, AR_FILT_OFDM, 0); 473 REG_WRITE(ah, AR_FILT_OFDM, 0);
643 REG_WRITE(ah, AR_FILT_CCK, 0); 474 REG_WRITE(ah, AR_FILT_CCK, 0);
644 REG_WRITE(ah, AR_MIBC, 475 REG_WRITE(ah, AR_MIBC,
@@ -646,6 +477,9 @@ void ath9k_enable_mib_counters(struct ath_hw *ah)
646 & 0x0f); 477 & 0x0f);
647 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); 478 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
648 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); 479 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
480
481 REGWRITE_BUFFER_FLUSH(ah);
482 DISABLE_REGWRITE_BUFFER(ah);
649} 483}
650 484
651/* Freeze the MIB counters, get the stats and then clear them */ 485/* Freeze the MIB counters, get the stats and then clear them */
@@ -809,20 +643,17 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
809 ath_print(common, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n", 643 ath_print(common, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n",
810 ah->ani[0].cckPhyErrBase); 644 ah->ani[0].cckPhyErrBase);
811 645
646 ENABLE_REGWRITE_BUFFER(ah);
647
812 REG_WRITE(ah, AR_PHY_ERR_1, ah->ani[0].ofdmPhyErrBase); 648 REG_WRITE(ah, AR_PHY_ERR_1, ah->ani[0].ofdmPhyErrBase);
813 REG_WRITE(ah, AR_PHY_ERR_2, ah->ani[0].cckPhyErrBase); 649 REG_WRITE(ah, AR_PHY_ERR_2, ah->ani[0].cckPhyErrBase);
650
651 REGWRITE_BUFFER_FLUSH(ah);
652 DISABLE_REGWRITE_BUFFER(ah);
653
814 ath9k_enable_mib_counters(ah); 654 ath9k_enable_mib_counters(ah);
815 655
816 ah->aniperiod = ATH9K_ANI_PERIOD; 656 ah->aniperiod = ATH9K_ANI_PERIOD;
817 if (ah->config.enable_ani) 657 if (ah->config.enable_ani)
818 ah->proc_phyerr |= HAL_PROCESS_ANI; 658 ah->proc_phyerr |= HAL_PROCESS_ANI;
819} 659}
820
821void ath9k_hw_ani_disable(struct ath_hw *ah)
822{
823 ath_print(ath9k_hw_common(ah), ATH_DBG_ANI, "Disabling ANI\n");
824
825 ath9k_hw_disable_mib_counters(ah);
826 REG_WRITE(ah, AR_PHY_ERR_1, 0);
827 REG_WRITE(ah, AR_PHY_ERR_2, 0);
828}
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 4e1ab94a5153..3356762ea384 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -118,6 +118,5 @@ u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah, u32 *rxc_pcnt,
118void ath9k_hw_procmibevent(struct ath_hw *ah); 118void ath9k_hw_procmibevent(struct ath_hw *ah);
119void ath9k_hw_ani_setup(struct ath_hw *ah); 119void ath9k_hw_ani_setup(struct ath_hw *ah);
120void ath9k_hw_ani_init(struct ath_hw *ah); 120void ath9k_hw_ani_init(struct ath_hw *ah);
121void ath9k_hw_ani_disable(struct ath_hw *ah);
122 121
123#endif /* ANI_H */ 122#endif /* ANI_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
new file mode 100644
index 000000000000..025c31ac6146
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
@@ -0,0 +1,742 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef INITVALS_AR5008_H
18#define INITVALS_AR5008_H
19
20static const u32 ar5416Modes[][6] = {
21 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
22 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
23 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
24 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
25 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
26 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
27 { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 },
28 { 0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a },
29 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
30 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
31 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
32 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
33 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
34 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
35 { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 },
36 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
37 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
38 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
39 { 0x00009850, 0x6c48b4e0, 0x6d48b4e0, 0x6d48b0de, 0x6c48b0de, 0x6c48b0de },
40 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
41 { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e },
42 { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 },
43 { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
44 { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 },
45 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
46 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
47 { 0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134 },
48 { 0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b },
49 { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
50 { 0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
51 { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
52 { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
53 { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 },
54 { 0x000099bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 },
55 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
56 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
57 { 0x000099c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c },
58 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
59 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
60 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
61 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
62 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
63 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
64 { 0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
65 { 0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
66 { 0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
67 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
68 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
69 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
70 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
71 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
72 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
73 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
74 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
75 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
76 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
77 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
78 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
79 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
80 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
81 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
82 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
83 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
84};
85
86static const u32 ar5416Common[][2] = {
87 { 0x0000000c, 0x00000000 },
88 { 0x00000030, 0x00020015 },
89 { 0x00000034, 0x00000005 },
90 { 0x00000040, 0x00000000 },
91 { 0x00000044, 0x00000008 },
92 { 0x00000048, 0x00000008 },
93 { 0x0000004c, 0x00000010 },
94 { 0x00000050, 0x00000000 },
95 { 0x00000054, 0x0000001f },
96 { 0x00000800, 0x00000000 },
97 { 0x00000804, 0x00000000 },
98 { 0x00000808, 0x00000000 },
99 { 0x0000080c, 0x00000000 },
100 { 0x00000810, 0x00000000 },
101 { 0x00000814, 0x00000000 },
102 { 0x00000818, 0x00000000 },
103 { 0x0000081c, 0x00000000 },
104 { 0x00000820, 0x00000000 },
105 { 0x00000824, 0x00000000 },
106 { 0x00001040, 0x002ffc0f },
107 { 0x00001044, 0x002ffc0f },
108 { 0x00001048, 0x002ffc0f },
109 { 0x0000104c, 0x002ffc0f },
110 { 0x00001050, 0x002ffc0f },
111 { 0x00001054, 0x002ffc0f },
112 { 0x00001058, 0x002ffc0f },
113 { 0x0000105c, 0x002ffc0f },
114 { 0x00001060, 0x002ffc0f },
115 { 0x00001064, 0x002ffc0f },
116 { 0x00001230, 0x00000000 },
117 { 0x00001270, 0x00000000 },
118 { 0x00001038, 0x00000000 },
119 { 0x00001078, 0x00000000 },
120 { 0x000010b8, 0x00000000 },
121 { 0x000010f8, 0x00000000 },
122 { 0x00001138, 0x00000000 },
123 { 0x00001178, 0x00000000 },
124 { 0x000011b8, 0x00000000 },
125 { 0x000011f8, 0x00000000 },
126 { 0x00001238, 0x00000000 },
127 { 0x00001278, 0x00000000 },
128 { 0x000012b8, 0x00000000 },
129 { 0x000012f8, 0x00000000 },
130 { 0x00001338, 0x00000000 },
131 { 0x00001378, 0x00000000 },
132 { 0x000013b8, 0x00000000 },
133 { 0x000013f8, 0x00000000 },
134 { 0x00001438, 0x00000000 },
135 { 0x00001478, 0x00000000 },
136 { 0x000014b8, 0x00000000 },
137 { 0x000014f8, 0x00000000 },
138 { 0x00001538, 0x00000000 },
139 { 0x00001578, 0x00000000 },
140 { 0x000015b8, 0x00000000 },
141 { 0x000015f8, 0x00000000 },
142 { 0x00001638, 0x00000000 },
143 { 0x00001678, 0x00000000 },
144 { 0x000016b8, 0x00000000 },
145 { 0x000016f8, 0x00000000 },
146 { 0x00001738, 0x00000000 },
147 { 0x00001778, 0x00000000 },
148 { 0x000017b8, 0x00000000 },
149 { 0x000017f8, 0x00000000 },
150 { 0x0000103c, 0x00000000 },
151 { 0x0000107c, 0x00000000 },
152 { 0x000010bc, 0x00000000 },
153 { 0x000010fc, 0x00000000 },
154 { 0x0000113c, 0x00000000 },
155 { 0x0000117c, 0x00000000 },
156 { 0x000011bc, 0x00000000 },
157 { 0x000011fc, 0x00000000 },
158 { 0x0000123c, 0x00000000 },
159 { 0x0000127c, 0x00000000 },
160 { 0x000012bc, 0x00000000 },
161 { 0x000012fc, 0x00000000 },
162 { 0x0000133c, 0x00000000 },
163 { 0x0000137c, 0x00000000 },
164 { 0x000013bc, 0x00000000 },
165 { 0x000013fc, 0x00000000 },
166 { 0x0000143c, 0x00000000 },
167 { 0x0000147c, 0x00000000 },
168 { 0x00004030, 0x00000002 },
169 { 0x0000403c, 0x00000002 },
170 { 0x00007010, 0x00000000 },
171 { 0x00007038, 0x000004c2 },
172 { 0x00008004, 0x00000000 },
173 { 0x00008008, 0x00000000 },
174 { 0x0000800c, 0x00000000 },
175 { 0x00008018, 0x00000700 },
176 { 0x00008020, 0x00000000 },
177 { 0x00008038, 0x00000000 },
178 { 0x0000803c, 0x00000000 },
179 { 0x00008048, 0x40000000 },
180 { 0x00008054, 0x00000000 },
181 { 0x00008058, 0x00000000 },
182 { 0x0000805c, 0x000fc78f },
183 { 0x00008060, 0x0000000f },
184 { 0x00008064, 0x00000000 },
185 { 0x000080c0, 0x2a82301a },
186 { 0x000080c4, 0x05dc01e0 },
187 { 0x000080c8, 0x1f402710 },
188 { 0x000080cc, 0x01f40000 },
189 { 0x000080d0, 0x00001e00 },
190 { 0x000080d4, 0x00000000 },
191 { 0x000080d8, 0x00400000 },
192 { 0x000080e0, 0xffffffff },
193 { 0x000080e4, 0x0000ffff },
194 { 0x000080e8, 0x003f3f3f },
195 { 0x000080ec, 0x00000000 },
196 { 0x000080f0, 0x00000000 },
197 { 0x000080f4, 0x00000000 },
198 { 0x000080f8, 0x00000000 },
199 { 0x000080fc, 0x00020000 },
200 { 0x00008100, 0x00020000 },
201 { 0x00008104, 0x00000001 },
202 { 0x00008108, 0x00000052 },
203 { 0x0000810c, 0x00000000 },
204 { 0x00008110, 0x00000168 },
205 { 0x00008118, 0x000100aa },
206 { 0x0000811c, 0x00003210 },
207 { 0x00008124, 0x00000000 },
208 { 0x00008128, 0x00000000 },
209 { 0x0000812c, 0x00000000 },
210 { 0x00008130, 0x00000000 },
211 { 0x00008134, 0x00000000 },
212 { 0x00008138, 0x00000000 },
213 { 0x0000813c, 0x00000000 },
214 { 0x00008144, 0xffffffff },
215 { 0x00008168, 0x00000000 },
216 { 0x0000816c, 0x00000000 },
217 { 0x00008170, 0x32143320 },
218 { 0x00008174, 0xfaa4fa50 },
219 { 0x00008178, 0x00000100 },
220 { 0x0000817c, 0x00000000 },
221 { 0x000081c4, 0x00000000 },
222 { 0x000081ec, 0x00000000 },
223 { 0x000081f0, 0x00000000 },
224 { 0x000081f4, 0x00000000 },
225 { 0x000081f8, 0x00000000 },
226 { 0x000081fc, 0x00000000 },
227 { 0x00008200, 0x00000000 },
228 { 0x00008204, 0x00000000 },
229 { 0x00008208, 0x00000000 },
230 { 0x0000820c, 0x00000000 },
231 { 0x00008210, 0x00000000 },
232 { 0x00008214, 0x00000000 },
233 { 0x00008218, 0x00000000 },
234 { 0x0000821c, 0x00000000 },
235 { 0x00008220, 0x00000000 },
236 { 0x00008224, 0x00000000 },
237 { 0x00008228, 0x00000000 },
238 { 0x0000822c, 0x00000000 },
239 { 0x00008230, 0x00000000 },
240 { 0x00008234, 0x00000000 },
241 { 0x00008238, 0x00000000 },
242 { 0x0000823c, 0x00000000 },
243 { 0x00008240, 0x00100000 },
244 { 0x00008244, 0x0010f400 },
245 { 0x00008248, 0x00000100 },
246 { 0x0000824c, 0x0001e800 },
247 { 0x00008250, 0x00000000 },
248 { 0x00008254, 0x00000000 },
249 { 0x00008258, 0x00000000 },
250 { 0x0000825c, 0x400000ff },
251 { 0x00008260, 0x00080922 },
252 { 0x00008264, 0x88000010 },
253 { 0x00008270, 0x00000000 },
254 { 0x00008274, 0x40000000 },
255 { 0x00008278, 0x003e4180 },
256 { 0x0000827c, 0x00000000 },
257 { 0x00008284, 0x0000002c },
258 { 0x00008288, 0x0000002c },
259 { 0x0000828c, 0x00000000 },
260 { 0x00008294, 0x00000000 },
261 { 0x00008298, 0x00000000 },
262 { 0x00008300, 0x00000000 },
263 { 0x00008304, 0x00000000 },
264 { 0x00008308, 0x00000000 },
265 { 0x0000830c, 0x00000000 },
266 { 0x00008310, 0x00000000 },
267 { 0x00008314, 0x00000000 },
268 { 0x00008318, 0x00000000 },
269 { 0x00008328, 0x00000000 },
270 { 0x0000832c, 0x00000007 },
271 { 0x00008330, 0x00000302 },
272 { 0x00008334, 0x00000e00 },
273 { 0x00008338, 0x00070000 },
274 { 0x0000833c, 0x00000000 },
275 { 0x00008340, 0x000107ff },
276 { 0x00009808, 0x00000000 },
277 { 0x0000980c, 0xad848e19 },
278 { 0x00009810, 0x7d14e000 },
279 { 0x00009814, 0x9c0a9f6b },
280 { 0x0000981c, 0x00000000 },
281 { 0x0000982c, 0x0000a000 },
282 { 0x00009830, 0x00000000 },
283 { 0x0000983c, 0x00200400 },
284 { 0x00009840, 0x206a002e },
285 { 0x0000984c, 0x1284233c },
286 { 0x00009854, 0x00000859 },
287 { 0x00009900, 0x00000000 },
288 { 0x00009904, 0x00000000 },
289 { 0x00009908, 0x00000000 },
290 { 0x0000990c, 0x00000000 },
291 { 0x0000991c, 0x10000fff },
292 { 0x00009920, 0x05100000 },
293 { 0x0000a920, 0x05100000 },
294 { 0x0000b920, 0x05100000 },
295 { 0x00009928, 0x00000001 },
296 { 0x0000992c, 0x00000004 },
297 { 0x00009934, 0x1e1f2022 },
298 { 0x00009938, 0x0a0b0c0d },
299 { 0x0000993c, 0x00000000 },
300 { 0x00009948, 0x9280b212 },
301 { 0x0000994c, 0x00020028 },
302 { 0x00009954, 0x5d50e188 },
303 { 0x00009958, 0x00081fff },
304 { 0x0000c95c, 0x004b6a8e },
305 { 0x0000c968, 0x000003ce },
306 { 0x00009970, 0x190fb515 },
307 { 0x00009974, 0x00000000 },
308 { 0x00009978, 0x00000001 },
309 { 0x0000997c, 0x00000000 },
310 { 0x00009980, 0x00000000 },
311 { 0x00009984, 0x00000000 },
312 { 0x00009988, 0x00000000 },
313 { 0x0000998c, 0x00000000 },
314 { 0x00009990, 0x00000000 },
315 { 0x00009994, 0x00000000 },
316 { 0x00009998, 0x00000000 },
317 { 0x0000999c, 0x00000000 },
318 { 0x000099a0, 0x00000000 },
319 { 0x000099a4, 0x00000001 },
320 { 0x000099a8, 0x001fff00 },
321 { 0x000099ac, 0x00000000 },
322 { 0x000099b0, 0x03051000 },
323 { 0x000099dc, 0x00000000 },
324 { 0x000099e0, 0x00000200 },
325 { 0x000099e4, 0xaaaaaaaa },
326 { 0x000099e8, 0x3c466478 },
327 { 0x000099ec, 0x000000aa },
328 { 0x000099fc, 0x00001042 },
329 { 0x00009b00, 0x00000000 },
330 { 0x00009b04, 0x00000001 },
331 { 0x00009b08, 0x00000002 },
332 { 0x00009b0c, 0x00000003 },
333 { 0x00009b10, 0x00000004 },
334 { 0x00009b14, 0x00000005 },
335 { 0x00009b18, 0x00000008 },
336 { 0x00009b1c, 0x00000009 },
337 { 0x00009b20, 0x0000000a },
338 { 0x00009b24, 0x0000000b },
339 { 0x00009b28, 0x0000000c },
340 { 0x00009b2c, 0x0000000d },
341 { 0x00009b30, 0x00000010 },
342 { 0x00009b34, 0x00000011 },
343 { 0x00009b38, 0x00000012 },
344 { 0x00009b3c, 0x00000013 },
345 { 0x00009b40, 0x00000014 },
346 { 0x00009b44, 0x00000015 },
347 { 0x00009b48, 0x00000018 },
348 { 0x00009b4c, 0x00000019 },
349 { 0x00009b50, 0x0000001a },
350 { 0x00009b54, 0x0000001b },
351 { 0x00009b58, 0x0000001c },
352 { 0x00009b5c, 0x0000001d },
353 { 0x00009b60, 0x00000020 },
354 { 0x00009b64, 0x00000021 },
355 { 0x00009b68, 0x00000022 },
356 { 0x00009b6c, 0x00000023 },
357 { 0x00009b70, 0x00000024 },
358 { 0x00009b74, 0x00000025 },
359 { 0x00009b78, 0x00000028 },
360 { 0x00009b7c, 0x00000029 },
361 { 0x00009b80, 0x0000002a },
362 { 0x00009b84, 0x0000002b },
363 { 0x00009b88, 0x0000002c },
364 { 0x00009b8c, 0x0000002d },
365 { 0x00009b90, 0x00000030 },
366 { 0x00009b94, 0x00000031 },
367 { 0x00009b98, 0x00000032 },
368 { 0x00009b9c, 0x00000033 },
369 { 0x00009ba0, 0x00000034 },
370 { 0x00009ba4, 0x00000035 },
371 { 0x00009ba8, 0x00000035 },
372 { 0x00009bac, 0x00000035 },
373 { 0x00009bb0, 0x00000035 },
374 { 0x00009bb4, 0x00000035 },
375 { 0x00009bb8, 0x00000035 },
376 { 0x00009bbc, 0x00000035 },
377 { 0x00009bc0, 0x00000035 },
378 { 0x00009bc4, 0x00000035 },
379 { 0x00009bc8, 0x00000035 },
380 { 0x00009bcc, 0x00000035 },
381 { 0x00009bd0, 0x00000035 },
382 { 0x00009bd4, 0x00000035 },
383 { 0x00009bd8, 0x00000035 },
384 { 0x00009bdc, 0x00000035 },
385 { 0x00009be0, 0x00000035 },
386 { 0x00009be4, 0x00000035 },
387 { 0x00009be8, 0x00000035 },
388 { 0x00009bec, 0x00000035 },
389 { 0x00009bf0, 0x00000035 },
390 { 0x00009bf4, 0x00000035 },
391 { 0x00009bf8, 0x00000010 },
392 { 0x00009bfc, 0x0000001a },
393 { 0x0000a210, 0x40806333 },
394 { 0x0000a214, 0x00106c10 },
395 { 0x0000a218, 0x009c4060 },
396 { 0x0000a220, 0x018830c6 },
397 { 0x0000a224, 0x00000400 },
398 { 0x0000a228, 0x00000bb5 },
399 { 0x0000a22c, 0x00000011 },
400 { 0x0000a234, 0x20202020 },
401 { 0x0000a238, 0x20202020 },
402 { 0x0000a23c, 0x13c889af },
403 { 0x0000a240, 0x38490a20 },
404 { 0x0000a244, 0x00007bb6 },
405 { 0x0000a248, 0x0fff3ffc },
406 { 0x0000a24c, 0x00000001 },
407 { 0x0000a250, 0x0000a000 },
408 { 0x0000a254, 0x00000000 },
409 { 0x0000a258, 0x0cc75380 },
410 { 0x0000a25c, 0x0f0f0f01 },
411 { 0x0000a260, 0xdfa91f01 },
412 { 0x0000a268, 0x00000000 },
413 { 0x0000a26c, 0x0e79e5c6 },
414 { 0x0000b26c, 0x0e79e5c6 },
415 { 0x0000c26c, 0x0e79e5c6 },
416 { 0x0000d270, 0x00820820 },
417 { 0x0000a278, 0x1ce739ce },
418 { 0x0000a27c, 0x051701ce },
419 { 0x0000a338, 0x00000000 },
420 { 0x0000a33c, 0x00000000 },
421 { 0x0000a340, 0x00000000 },
422 { 0x0000a344, 0x00000000 },
423 { 0x0000a348, 0x3fffffff },
424 { 0x0000a34c, 0x3fffffff },
425 { 0x0000a350, 0x3fffffff },
426 { 0x0000a354, 0x0003ffff },
427 { 0x0000a358, 0x79a8aa1f },
428 { 0x0000d35c, 0x07ffffef },
429 { 0x0000d360, 0x0fffffe7 },
430 { 0x0000d364, 0x17ffffe5 },
431 { 0x0000d368, 0x1fffffe4 },
432 { 0x0000d36c, 0x37ffffe3 },
433 { 0x0000d370, 0x3fffffe3 },
434 { 0x0000d374, 0x57ffffe3 },
435 { 0x0000d378, 0x5fffffe2 },
436 { 0x0000d37c, 0x7fffffe2 },
437 { 0x0000d380, 0x7f3c7bba },
438 { 0x0000d384, 0xf3307ff0 },
439 { 0x0000a388, 0x08000000 },
440 { 0x0000a38c, 0x20202020 },
441 { 0x0000a390, 0x20202020 },
442 { 0x0000a394, 0x1ce739ce },
443 { 0x0000a398, 0x000001ce },
444 { 0x0000a39c, 0x00000001 },
445 { 0x0000a3a0, 0x00000000 },
446 { 0x0000a3a4, 0x00000000 },
447 { 0x0000a3a8, 0x00000000 },
448 { 0x0000a3ac, 0x00000000 },
449 { 0x0000a3b0, 0x00000000 },
450 { 0x0000a3b4, 0x00000000 },
451 { 0x0000a3b8, 0x00000000 },
452 { 0x0000a3bc, 0x00000000 },
453 { 0x0000a3c0, 0x00000000 },
454 { 0x0000a3c4, 0x00000000 },
455 { 0x0000a3c8, 0x00000246 },
456 { 0x0000a3cc, 0x20202020 },
457 { 0x0000a3d0, 0x20202020 },
458 { 0x0000a3d4, 0x20202020 },
459 { 0x0000a3dc, 0x1ce739ce },
460 { 0x0000a3e0, 0x000001ce },
461};
462
463static const u32 ar5416Bank0[][2] = {
464 { 0x000098b0, 0x1e5795e5 },
465 { 0x000098e0, 0x02008020 },
466};
467
468static const u32 ar5416BB_RfGain[][3] = {
469 { 0x00009a00, 0x00000000, 0x00000000 },
470 { 0x00009a04, 0x00000040, 0x00000040 },
471 { 0x00009a08, 0x00000080, 0x00000080 },
472 { 0x00009a0c, 0x000001a1, 0x00000141 },
473 { 0x00009a10, 0x000001e1, 0x00000181 },
474 { 0x00009a14, 0x00000021, 0x000001c1 },
475 { 0x00009a18, 0x00000061, 0x00000001 },
476 { 0x00009a1c, 0x00000168, 0x00000041 },
477 { 0x00009a20, 0x000001a8, 0x000001a8 },
478 { 0x00009a24, 0x000001e8, 0x000001e8 },
479 { 0x00009a28, 0x00000028, 0x00000028 },
480 { 0x00009a2c, 0x00000068, 0x00000068 },
481 { 0x00009a30, 0x00000189, 0x000000a8 },
482 { 0x00009a34, 0x000001c9, 0x00000169 },
483 { 0x00009a38, 0x00000009, 0x000001a9 },
484 { 0x00009a3c, 0x00000049, 0x000001e9 },
485 { 0x00009a40, 0x00000089, 0x00000029 },
486 { 0x00009a44, 0x00000170, 0x00000069 },
487 { 0x00009a48, 0x000001b0, 0x00000190 },
488 { 0x00009a4c, 0x000001f0, 0x000001d0 },
489 { 0x00009a50, 0x00000030, 0x00000010 },
490 { 0x00009a54, 0x00000070, 0x00000050 },
491 { 0x00009a58, 0x00000191, 0x00000090 },
492 { 0x00009a5c, 0x000001d1, 0x00000151 },
493 { 0x00009a60, 0x00000011, 0x00000191 },
494 { 0x00009a64, 0x00000051, 0x000001d1 },
495 { 0x00009a68, 0x00000091, 0x00000011 },
496 { 0x00009a6c, 0x000001b8, 0x00000051 },
497 { 0x00009a70, 0x000001f8, 0x00000198 },
498 { 0x00009a74, 0x00000038, 0x000001d8 },
499 { 0x00009a78, 0x00000078, 0x00000018 },
500 { 0x00009a7c, 0x00000199, 0x00000058 },
501 { 0x00009a80, 0x000001d9, 0x00000098 },
502 { 0x00009a84, 0x00000019, 0x00000159 },
503 { 0x00009a88, 0x00000059, 0x00000199 },
504 { 0x00009a8c, 0x00000099, 0x000001d9 },
505 { 0x00009a90, 0x000000d9, 0x00000019 },
506 { 0x00009a94, 0x000000f9, 0x00000059 },
507 { 0x00009a98, 0x000000f9, 0x00000099 },
508 { 0x00009a9c, 0x000000f9, 0x000000d9 },
509 { 0x00009aa0, 0x000000f9, 0x000000f9 },
510 { 0x00009aa4, 0x000000f9, 0x000000f9 },
511 { 0x00009aa8, 0x000000f9, 0x000000f9 },
512 { 0x00009aac, 0x000000f9, 0x000000f9 },
513 { 0x00009ab0, 0x000000f9, 0x000000f9 },
514 { 0x00009ab4, 0x000000f9, 0x000000f9 },
515 { 0x00009ab8, 0x000000f9, 0x000000f9 },
516 { 0x00009abc, 0x000000f9, 0x000000f9 },
517 { 0x00009ac0, 0x000000f9, 0x000000f9 },
518 { 0x00009ac4, 0x000000f9, 0x000000f9 },
519 { 0x00009ac8, 0x000000f9, 0x000000f9 },
520 { 0x00009acc, 0x000000f9, 0x000000f9 },
521 { 0x00009ad0, 0x000000f9, 0x000000f9 },
522 { 0x00009ad4, 0x000000f9, 0x000000f9 },
523 { 0x00009ad8, 0x000000f9, 0x000000f9 },
524 { 0x00009adc, 0x000000f9, 0x000000f9 },
525 { 0x00009ae0, 0x000000f9, 0x000000f9 },
526 { 0x00009ae4, 0x000000f9, 0x000000f9 },
527 { 0x00009ae8, 0x000000f9, 0x000000f9 },
528 { 0x00009aec, 0x000000f9, 0x000000f9 },
529 { 0x00009af0, 0x000000f9, 0x000000f9 },
530 { 0x00009af4, 0x000000f9, 0x000000f9 },
531 { 0x00009af8, 0x000000f9, 0x000000f9 },
532 { 0x00009afc, 0x000000f9, 0x000000f9 },
533};
534
535static const u32 ar5416Bank1[][2] = {
536 { 0x000098b0, 0x02108421 },
537 { 0x000098ec, 0x00000008 },
538};
539
540static const u32 ar5416Bank2[][2] = {
541 { 0x000098b0, 0x0e73ff17 },
542 { 0x000098e0, 0x00000420 },
543};
544
545static const u32 ar5416Bank3[][3] = {
546 { 0x000098f0, 0x01400018, 0x01c00018 },
547};
548
549static const u32 ar5416Bank6[][3] = {
550
551 { 0x0000989c, 0x00000000, 0x00000000 },
552 { 0x0000989c, 0x00000000, 0x00000000 },
553 { 0x0000989c, 0x00000000, 0x00000000 },
554 { 0x0000989c, 0x00e00000, 0x00e00000 },
555 { 0x0000989c, 0x005e0000, 0x005e0000 },
556 { 0x0000989c, 0x00120000, 0x00120000 },
557 { 0x0000989c, 0x00620000, 0x00620000 },
558 { 0x0000989c, 0x00020000, 0x00020000 },
559 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
560 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
561 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
562 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
563 { 0x0000989c, 0x005f0000, 0x005f0000 },
564 { 0x0000989c, 0x00870000, 0x00870000 },
565 { 0x0000989c, 0x00f90000, 0x00f90000 },
566 { 0x0000989c, 0x007b0000, 0x007b0000 },
567 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
568 { 0x0000989c, 0x00f50000, 0x00f50000 },
569 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
570 { 0x0000989c, 0x00110000, 0x00110000 },
571 { 0x0000989c, 0x006100a8, 0x006100a8 },
572 { 0x0000989c, 0x004210a2, 0x004210a2 },
573 { 0x0000989c, 0x0014008f, 0x0014008f },
574 { 0x0000989c, 0x00c40003, 0x00c40003 },
575 { 0x0000989c, 0x003000f2, 0x003000f2 },
576 { 0x0000989c, 0x00440016, 0x00440016 },
577 { 0x0000989c, 0x00410040, 0x00410040 },
578 { 0x0000989c, 0x0001805e, 0x0001805e },
579 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
580 { 0x0000989c, 0x000000f1, 0x000000f1 },
581 { 0x0000989c, 0x00002081, 0x00002081 },
582 { 0x0000989c, 0x000000d4, 0x000000d4 },
583 { 0x000098d0, 0x0000000f, 0x0010000f },
584};
585
586static const u32 ar5416Bank6TPC[][3] = {
587 { 0x0000989c, 0x00000000, 0x00000000 },
588 { 0x0000989c, 0x00000000, 0x00000000 },
589 { 0x0000989c, 0x00000000, 0x00000000 },
590 { 0x0000989c, 0x00e00000, 0x00e00000 },
591 { 0x0000989c, 0x005e0000, 0x005e0000 },
592 { 0x0000989c, 0x00120000, 0x00120000 },
593 { 0x0000989c, 0x00620000, 0x00620000 },
594 { 0x0000989c, 0x00020000, 0x00020000 },
595 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
596 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
597 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
598 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
599 { 0x0000989c, 0x005f0000, 0x005f0000 },
600 { 0x0000989c, 0x00870000, 0x00870000 },
601 { 0x0000989c, 0x00f90000, 0x00f90000 },
602 { 0x0000989c, 0x007b0000, 0x007b0000 },
603 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
604 { 0x0000989c, 0x00f50000, 0x00f50000 },
605 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
606 { 0x0000989c, 0x00110000, 0x00110000 },
607 { 0x0000989c, 0x006100a8, 0x006100a8 },
608 { 0x0000989c, 0x00423022, 0x00423022 },
609 { 0x0000989c, 0x201400df, 0x201400df },
610 { 0x0000989c, 0x00c40002, 0x00c40002 },
611 { 0x0000989c, 0x003000f2, 0x003000f2 },
612 { 0x0000989c, 0x00440016, 0x00440016 },
613 { 0x0000989c, 0x00410040, 0x00410040 },
614 { 0x0000989c, 0x0001805e, 0x0001805e },
615 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
616 { 0x0000989c, 0x000000e1, 0x000000e1 },
617 { 0x0000989c, 0x00007081, 0x00007081 },
618 { 0x0000989c, 0x000000d4, 0x000000d4 },
619 { 0x000098d0, 0x0000000f, 0x0010000f },
620};
621
622static const u32 ar5416Bank7[][2] = {
623 { 0x0000989c, 0x00000500 },
624 { 0x0000989c, 0x00000800 },
625 { 0x000098cc, 0x0000000e },
626};
627
628static const u32 ar5416Addac[][2] = {
629 {0x0000989c, 0x00000000 },
630 {0x0000989c, 0x00000003 },
631 {0x0000989c, 0x00000000 },
632 {0x0000989c, 0x0000000c },
633 {0x0000989c, 0x00000000 },
634 {0x0000989c, 0x00000030 },
635 {0x0000989c, 0x00000000 },
636 {0x0000989c, 0x00000000 },
637 {0x0000989c, 0x00000000 },
638 {0x0000989c, 0x00000000 },
639 {0x0000989c, 0x00000000 },
640 {0x0000989c, 0x00000000 },
641 {0x0000989c, 0x00000000 },
642 {0x0000989c, 0x00000000 },
643 {0x0000989c, 0x00000000 },
644 {0x0000989c, 0x00000000 },
645 {0x0000989c, 0x00000000 },
646 {0x0000989c, 0x00000000 },
647 {0x0000989c, 0x00000060 },
648 {0x0000989c, 0x00000000 },
649 {0x0000989c, 0x00000000 },
650 {0x0000989c, 0x00000000 },
651 {0x0000989c, 0x00000000 },
652 {0x0000989c, 0x00000000 },
653 {0x0000989c, 0x00000000 },
654 {0x0000989c, 0x00000000 },
655 {0x0000989c, 0x00000000 },
656 {0x0000989c, 0x00000000 },
657 {0x0000989c, 0x00000000 },
658 {0x0000989c, 0x00000000 },
659 {0x0000989c, 0x00000000 },
660 {0x0000989c, 0x00000058 },
661 {0x0000989c, 0x00000000 },
662 {0x0000989c, 0x00000000 },
663 {0x0000989c, 0x00000000 },
664 {0x0000989c, 0x00000000 },
665 {0x000098cc, 0x00000000 },
666};
667
668static const u32 ar5416Modes_9100[][6] = {
669 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
670 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
671 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
672 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
673 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
674 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
675 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
676 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
677 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
678 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
679 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
680 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
681 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
682 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
683 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
684 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
685 { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 },
686 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec86d2e, 0x7ec84d2e, 0x7ec82d2e },
687 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
688 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
689 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
690 { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
691 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
692 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
693 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
694 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a11, 0xd00a8a0d, 0xd00a8a0d },
695 { 0x00009940, 0x00754604, 0x00754604, 0xfff81204, 0xfff81204, 0xfff81204 },
696 { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
697 { 0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e, 0xe250a51e },
698 { 0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff, 0x3388ffff },
699#ifdef TB243
700 { 0x00009960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
701 { 0x0000a960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
702 { 0x0000b960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
703 { 0x00009964, 0x00000000, 0x00000000, 0x00002210, 0x00002210, 0x00001120 },
704#else
705 { 0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
706 { 0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
707 { 0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
708 { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
709#endif
710 { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00, 0x001a0c00 },
711 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
712 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
713 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
714 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
715 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
716 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
717 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
718 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
719 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
720 { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
721 { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
722 { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
723 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
724 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
725 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
726 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
727 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
728 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
729 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
730 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
731 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
732 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
733 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
734 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
735 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
736 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
737 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
738 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
739 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
740};
741
742#endif /* INITVALS_AR5008_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
new file mode 100644
index 000000000000..b2c17c98bb38
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -0,0 +1,1374 @@
1/*
2 * Copyright (c) 2008-2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "hw.h"
18#include "hw-ops.h"
19#include "../regd.h"
20#include "ar9002_phy.h"
21
22/* All code below is for non single-chip solutions */
23
24/**
25 * ar5008_hw_phy_modify_rx_buffer() - perform analog swizzling of parameters
26 * @rfbuf:
27 * @reg32:
28 * @numBits:
29 * @firstBit:
30 * @column:
31 *
32 * Performs analog "swizzling" of parameters into their location.
33 * Used on external AR2133/AR5133 radios.
34 */
35static void ar5008_hw_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
36 u32 numBits, u32 firstBit,
37 u32 column)
38{
39 u32 tmp32, mask, arrayEntry, lastBit;
40 int32_t bitPosition, bitsLeft;
41
42 tmp32 = ath9k_hw_reverse_bits(reg32, numBits);
43 arrayEntry = (firstBit - 1) / 8;
44 bitPosition = (firstBit - 1) % 8;
45 bitsLeft = numBits;
46 while (bitsLeft > 0) {
47 lastBit = (bitPosition + bitsLeft > 8) ?
48 8 : bitPosition + bitsLeft;
49 mask = (((1 << lastBit) - 1) ^ ((1 << bitPosition) - 1)) <<
50 (column * 8);
51 rfBuf[arrayEntry] &= ~mask;
52 rfBuf[arrayEntry] |= ((tmp32 << bitPosition) <<
53 (column * 8)) & mask;
54 bitsLeft -= 8 - bitPosition;
55 tmp32 = tmp32 >> (8 - bitPosition);
56 bitPosition = 0;
57 arrayEntry++;
58 }
59}
60
61/*
62 * Fix on 2.4 GHz band for orientation sensitivity issue by increasing
63 * rf_pwd_icsyndiv.
64 *
65 * Theoretical Rules:
66 * if 2 GHz band
67 * if forceBiasAuto
68 * if synth_freq < 2412
69 * bias = 0
70 * else if 2412 <= synth_freq <= 2422
71 * bias = 1
72 * else // synth_freq > 2422
73 * bias = 2
74 * else if forceBias > 0
75 * bias = forceBias & 7
76 * else
77 * no change, use value from ini file
78 * else
79 * no change, invalid band
80 *
81 * 1st Mod:
82 * 2422 also uses value of 2
83 * <approved>
84 *
85 * 2nd Mod:
86 * Less than 2412 uses value of 0, 2412 and above uses value of 2
87 */
88static void ar5008_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
89{
90 struct ath_common *common = ath9k_hw_common(ah);
91 u32 tmp_reg;
92 int reg_writes = 0;
93 u32 new_bias = 0;
94
95 if (!AR_SREV_5416(ah) || synth_freq >= 3000)
96 return;
97
98 BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
99
100 if (synth_freq < 2412)
101 new_bias = 0;
102 else if (synth_freq < 2422)
103 new_bias = 1;
104 else
105 new_bias = 2;
106
107 /* pre-reverse this field */
108 tmp_reg = ath9k_hw_reverse_bits(new_bias, 3);
109
110 ath_print(common, ATH_DBG_CONFIG,
111 "Force rf_pwd_icsyndiv to %1d on %4d\n",
112 new_bias, synth_freq);
113
114 /* swizzle rf_pwd_icsyndiv */
115 ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3);
116
117 /* write Bank 6 with new params */
118 REG_WRITE_RF_ARRAY(&ah->iniBank6, ah->analogBank6Data, reg_writes);
119}
120
121/**
122 * ar5008_hw_set_channel - tune to a channel on the external AR2133/AR5133 radios
123 * @ah: atheros hardware stucture
124 * @chan:
125 *
126 * For the external AR2133/AR5133 radios, takes the MHz channel value and set
127 * the channel value. Assumes writes enabled to analog bus and bank6 register
128 * cache in ah->analogBank6Data.
129 */
130static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
131{
132 struct ath_common *common = ath9k_hw_common(ah);
133 u32 channelSel = 0;
134 u32 bModeSynth = 0;
135 u32 aModeRefSel = 0;
136 u32 reg32 = 0;
137 u16 freq;
138 struct chan_centers centers;
139
140 ath9k_hw_get_channel_centers(ah, chan, &centers);
141 freq = centers.synth_center;
142
143 if (freq < 4800) {
144 u32 txctl;
145
146 if (((freq - 2192) % 5) == 0) {
147 channelSel = ((freq - 672) * 2 - 3040) / 10;
148 bModeSynth = 0;
149 } else if (((freq - 2224) % 5) == 0) {
150 channelSel = ((freq - 704) * 2 - 3040) / 10;
151 bModeSynth = 1;
152 } else {
153 ath_print(common, ATH_DBG_FATAL,
154 "Invalid channel %u MHz\n", freq);
155 return -EINVAL;
156 }
157
158 channelSel = (channelSel << 2) & 0xff;
159 channelSel = ath9k_hw_reverse_bits(channelSel, 8);
160
161 txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
162 if (freq == 2484) {
163
164 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
165 txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
166 } else {
167 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
168 txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
169 }
170
171 } else if ((freq % 20) == 0 && freq >= 5120) {
172 channelSel =
173 ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
174 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
175 } else if ((freq % 10) == 0) {
176 channelSel =
177 ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
178 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
179 aModeRefSel = ath9k_hw_reverse_bits(2, 2);
180 else
181 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
182 } else if ((freq % 5) == 0) {
183 channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
184 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
185 } else {
186 ath_print(common, ATH_DBG_FATAL,
187 "Invalid channel %u MHz\n", freq);
188 return -EINVAL;
189 }
190
191 ar5008_hw_force_bias(ah, freq);
192
193 reg32 =
194 (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
195 (1 << 5) | 0x1;
196
197 REG_WRITE(ah, AR_PHY(0x37), reg32);
198
199 ah->curchan = chan;
200 ah->curchan_rad_index = -1;
201
202 return 0;
203}
204
205/**
206 * ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios
207 * @ah: atheros hardware structure
208 * @chan:
209 *
210 * For non single-chip solutions. Converts to baseband spur frequency given the
211 * input channel frequency and compute register settings below.
212 */
213static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
214 struct ath9k_channel *chan)
215{
216 int bb_spur = AR_NO_SPUR;
217 int bin, cur_bin;
218 int spur_freq_sd;
219 int spur_delta_phase;
220 int denominator;
221 int upper, lower, cur_vit_mask;
222 int tmp, new;
223 int i;
224 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
225 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
226 };
227 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
228 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
229 };
230 int inc[4] = { 0, 100, 0, 0 };
231
232 int8_t mask_m[123];
233 int8_t mask_p[123];
234 int8_t mask_amt;
235 int tmp_mask;
236 int cur_bb_spur;
237 bool is2GHz = IS_CHAN_2GHZ(chan);
238
239 memset(&mask_m, 0, sizeof(int8_t) * 123);
240 memset(&mask_p, 0, sizeof(int8_t) * 123);
241
242 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
243 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
244 if (AR_NO_SPUR == cur_bb_spur)
245 break;
246 cur_bb_spur = cur_bb_spur - (chan->channel * 10);
247 if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
248 bb_spur = cur_bb_spur;
249 break;
250 }
251 }
252
253 if (AR_NO_SPUR == bb_spur)
254 return;
255
256 bin = bb_spur * 32;
257
258 tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
259 new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
260 AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
261 AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
262 AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
263
264 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
265
266 new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
267 AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
268 AR_PHY_SPUR_REG_MASK_RATE_SELECT |
269 AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
270 SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
271 REG_WRITE(ah, AR_PHY_SPUR_REG, new);
272
273 spur_delta_phase = ((bb_spur * 524288) / 100) &
274 AR_PHY_TIMING11_SPUR_DELTA_PHASE;
275
276 denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
277 spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
278
279 new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
280 SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
281 SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
282 REG_WRITE(ah, AR_PHY_TIMING11, new);
283
284 cur_bin = -6000;
285 upper = bin + 100;
286 lower = bin - 100;
287
288 for (i = 0; i < 4; i++) {
289 int pilot_mask = 0;
290 int chan_mask = 0;
291 int bp = 0;
292 for (bp = 0; bp < 30; bp++) {
293 if ((cur_bin > lower) && (cur_bin < upper)) {
294 pilot_mask = pilot_mask | 0x1 << bp;
295 chan_mask = chan_mask | 0x1 << bp;
296 }
297 cur_bin += 100;
298 }
299 cur_bin += inc[i];
300 REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
301 REG_WRITE(ah, chan_mask_reg[i], chan_mask);
302 }
303
304 cur_vit_mask = 6100;
305 upper = bin + 120;
306 lower = bin - 120;
307
308 for (i = 0; i < 123; i++) {
309 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
310
311 /* workaround for gcc bug #37014 */
312 volatile int tmp_v = abs(cur_vit_mask - bin);
313
314 if (tmp_v < 75)
315 mask_amt = 1;
316 else
317 mask_amt = 0;
318 if (cur_vit_mask < 0)
319 mask_m[abs(cur_vit_mask / 100)] = mask_amt;
320 else
321 mask_p[cur_vit_mask / 100] = mask_amt;
322 }
323 cur_vit_mask -= 100;
324 }
325
326 tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
327 | (mask_m[48] << 26) | (mask_m[49] << 24)
328 | (mask_m[50] << 22) | (mask_m[51] << 20)
329 | (mask_m[52] << 18) | (mask_m[53] << 16)
330 | (mask_m[54] << 14) | (mask_m[55] << 12)
331 | (mask_m[56] << 10) | (mask_m[57] << 8)
332 | (mask_m[58] << 6) | (mask_m[59] << 4)
333 | (mask_m[60] << 2) | (mask_m[61] << 0);
334 REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
335 REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
336
337 tmp_mask = (mask_m[31] << 28)
338 | (mask_m[32] << 26) | (mask_m[33] << 24)
339 | (mask_m[34] << 22) | (mask_m[35] << 20)
340 | (mask_m[36] << 18) | (mask_m[37] << 16)
341 | (mask_m[48] << 14) | (mask_m[39] << 12)
342 | (mask_m[40] << 10) | (mask_m[41] << 8)
343 | (mask_m[42] << 6) | (mask_m[43] << 4)
344 | (mask_m[44] << 2) | (mask_m[45] << 0);
345 REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
346 REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
347
348 tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
349 | (mask_m[18] << 26) | (mask_m[18] << 24)
350 | (mask_m[20] << 22) | (mask_m[20] << 20)
351 | (mask_m[22] << 18) | (mask_m[22] << 16)
352 | (mask_m[24] << 14) | (mask_m[24] << 12)
353 | (mask_m[25] << 10) | (mask_m[26] << 8)
354 | (mask_m[27] << 6) | (mask_m[28] << 4)
355 | (mask_m[29] << 2) | (mask_m[30] << 0);
356 REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
357 REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
358
359 tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
360 | (mask_m[2] << 26) | (mask_m[3] << 24)
361 | (mask_m[4] << 22) | (mask_m[5] << 20)
362 | (mask_m[6] << 18) | (mask_m[7] << 16)
363 | (mask_m[8] << 14) | (mask_m[9] << 12)
364 | (mask_m[10] << 10) | (mask_m[11] << 8)
365 | (mask_m[12] << 6) | (mask_m[13] << 4)
366 | (mask_m[14] << 2) | (mask_m[15] << 0);
367 REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
368 REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
369
370 tmp_mask = (mask_p[15] << 28)
371 | (mask_p[14] << 26) | (mask_p[13] << 24)
372 | (mask_p[12] << 22) | (mask_p[11] << 20)
373 | (mask_p[10] << 18) | (mask_p[9] << 16)
374 | (mask_p[8] << 14) | (mask_p[7] << 12)
375 | (mask_p[6] << 10) | (mask_p[5] << 8)
376 | (mask_p[4] << 6) | (mask_p[3] << 4)
377 | (mask_p[2] << 2) | (mask_p[1] << 0);
378 REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
379 REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
380
381 tmp_mask = (mask_p[30] << 28)
382 | (mask_p[29] << 26) | (mask_p[28] << 24)
383 | (mask_p[27] << 22) | (mask_p[26] << 20)
384 | (mask_p[25] << 18) | (mask_p[24] << 16)
385 | (mask_p[23] << 14) | (mask_p[22] << 12)
386 | (mask_p[21] << 10) | (mask_p[20] << 8)
387 | (mask_p[19] << 6) | (mask_p[18] << 4)
388 | (mask_p[17] << 2) | (mask_p[16] << 0);
389 REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
390 REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
391
392 tmp_mask = (mask_p[45] << 28)
393 | (mask_p[44] << 26) | (mask_p[43] << 24)
394 | (mask_p[42] << 22) | (mask_p[41] << 20)
395 | (mask_p[40] << 18) | (mask_p[39] << 16)
396 | (mask_p[38] << 14) | (mask_p[37] << 12)
397 | (mask_p[36] << 10) | (mask_p[35] << 8)
398 | (mask_p[34] << 6) | (mask_p[33] << 4)
399 | (mask_p[32] << 2) | (mask_p[31] << 0);
400 REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
401 REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
402
403 tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
404 | (mask_p[59] << 26) | (mask_p[58] << 24)
405 | (mask_p[57] << 22) | (mask_p[56] << 20)
406 | (mask_p[55] << 18) | (mask_p[54] << 16)
407 | (mask_p[53] << 14) | (mask_p[52] << 12)
408 | (mask_p[51] << 10) | (mask_p[50] << 8)
409 | (mask_p[49] << 6) | (mask_p[48] << 4)
410 | (mask_p[47] << 2) | (mask_p[46] << 0);
411 REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
412 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
413}
414
415/**
416 * ar5008_hw_rf_alloc_ext_banks - allocates banks for external radio programming
417 * @ah: atheros hardware structure
418 *
419 * Only required for older devices with external AR2133/AR5133 radios.
420 */
421static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah)
422{
423#define ATH_ALLOC_BANK(bank, size) do { \
424 bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \
425 if (!bank) { \
426 ath_print(common, ATH_DBG_FATAL, \
427 "Cannot allocate RF banks\n"); \
428 return -ENOMEM; \
429 } \
430 } while (0);
431
432 struct ath_common *common = ath9k_hw_common(ah);
433
434 BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
435
436 ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows);
437 ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows);
438 ATH_ALLOC_BANK(ah->analogBank2Data, ah->iniBank2.ia_rows);
439 ATH_ALLOC_BANK(ah->analogBank3Data, ah->iniBank3.ia_rows);
440 ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows);
441 ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows);
442 ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows);
443 ATH_ALLOC_BANK(ah->addac5416_21,
444 ah->iniAddac.ia_rows * ah->iniAddac.ia_columns);
445 ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows);
446
447 return 0;
448#undef ATH_ALLOC_BANK
449}
450
451
452/**
453 * ar5008_hw_rf_free_ext_banks - Free memory for analog bank scratch buffers
454 * @ah: atheros hardware struture
455 * For the external AR2133/AR5133 radios banks.
456 */
457static void ar5008_hw_rf_free_ext_banks(struct ath_hw *ah)
458{
459#define ATH_FREE_BANK(bank) do { \
460 kfree(bank); \
461 bank = NULL; \
462 } while (0);
463
464 BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
465
466 ATH_FREE_BANK(ah->analogBank0Data);
467 ATH_FREE_BANK(ah->analogBank1Data);
468 ATH_FREE_BANK(ah->analogBank2Data);
469 ATH_FREE_BANK(ah->analogBank3Data);
470 ATH_FREE_BANK(ah->analogBank6Data);
471 ATH_FREE_BANK(ah->analogBank6TPCData);
472 ATH_FREE_BANK(ah->analogBank7Data);
473 ATH_FREE_BANK(ah->addac5416_21);
474 ATH_FREE_BANK(ah->bank6Temp);
475
476#undef ATH_FREE_BANK
477}
478
479/* *
480 * ar5008_hw_set_rf_regs - programs rf registers based on EEPROM
481 * @ah: atheros hardware structure
482 * @chan:
483 * @modesIndex:
484 *
485 * Used for the external AR2133/AR5133 radios.
486 *
487 * Reads the EEPROM header info from the device structure and programs
488 * all rf registers. This routine requires access to the analog
489 * rf device. This is not required for single-chip devices.
490 */
491static bool ar5008_hw_set_rf_regs(struct ath_hw *ah,
492 struct ath9k_channel *chan,
493 u16 modesIndex)
494{
495 u32 eepMinorRev;
496 u32 ob5GHz = 0, db5GHz = 0;
497 u32 ob2GHz = 0, db2GHz = 0;
498 int regWrites = 0;
499
500 /*
501 * Software does not need to program bank data
502 * for single chip devices, that is AR9280 or anything
503 * after that.
504 */
505 if (AR_SREV_9280_10_OR_LATER(ah))
506 return true;
507
508 /* Setup rf parameters */
509 eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV);
510
511 /* Setup Bank 0 Write */
512 RF_BANK_SETUP(ah->analogBank0Data, &ah->iniBank0, 1);
513
514 /* Setup Bank 1 Write */
515 RF_BANK_SETUP(ah->analogBank1Data, &ah->iniBank1, 1);
516
517 /* Setup Bank 2 Write */
518 RF_BANK_SETUP(ah->analogBank2Data, &ah->iniBank2, 1);
519
520 /* Setup Bank 6 Write */
521 RF_BANK_SETUP(ah->analogBank3Data, &ah->iniBank3,
522 modesIndex);
523 {
524 int i;
525 for (i = 0; i < ah->iniBank6TPC.ia_rows; i++) {
526 ah->analogBank6Data[i] =
527 INI_RA(&ah->iniBank6TPC, i, modesIndex);
528 }
529 }
530
531 /* Only the 5 or 2 GHz OB/DB need to be set for a mode */
532 if (eepMinorRev >= 2) {
533 if (IS_CHAN_2GHZ(chan)) {
534 ob2GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_2);
535 db2GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_2);
536 ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
537 ob2GHz, 3, 197, 0);
538 ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
539 db2GHz, 3, 194, 0);
540 } else {
541 ob5GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_5);
542 db5GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_5);
543 ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
544 ob5GHz, 3, 203, 0);
545 ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
546 db5GHz, 3, 200, 0);
547 }
548 }
549
550 /* Setup Bank 7 Setup */
551 RF_BANK_SETUP(ah->analogBank7Data, &ah->iniBank7, 1);
552
553 /* Write Analog registers */
554 REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data,
555 regWrites);
556 REG_WRITE_RF_ARRAY(&ah->iniBank1, ah->analogBank1Data,
557 regWrites);
558 REG_WRITE_RF_ARRAY(&ah->iniBank2, ah->analogBank2Data,
559 regWrites);
560 REG_WRITE_RF_ARRAY(&ah->iniBank3, ah->analogBank3Data,
561 regWrites);
562 REG_WRITE_RF_ARRAY(&ah->iniBank6TPC, ah->analogBank6Data,
563 regWrites);
564 REG_WRITE_RF_ARRAY(&ah->iniBank7, ah->analogBank7Data,
565 regWrites);
566
567 return true;
568}
569
570static void ar5008_hw_init_bb(struct ath_hw *ah,
571 struct ath9k_channel *chan)
572{
573 u32 synthDelay;
574
575 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
576 if (IS_CHAN_B(chan))
577 synthDelay = (4 * synthDelay) / 22;
578 else
579 synthDelay /= 10;
580
581 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
582
583 udelay(synthDelay + BASE_ACTIVATE_DELAY);
584}
585
586static void ar5008_hw_init_chain_masks(struct ath_hw *ah)
587{
588 int rx_chainmask, tx_chainmask;
589
590 rx_chainmask = ah->rxchainmask;
591 tx_chainmask = ah->txchainmask;
592
593 ENABLE_REGWRITE_BUFFER(ah);
594
595 switch (rx_chainmask) {
596 case 0x5:
597 DISABLE_REGWRITE_BUFFER(ah);
598 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
599 AR_PHY_SWAP_ALT_CHAIN);
600 ENABLE_REGWRITE_BUFFER(ah);
601 case 0x3:
602 if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
603 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
604 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
605 break;
606 }
607 case 0x1:
608 case 0x2:
609 case 0x7:
610 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
611 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
612 break;
613 default:
614 break;
615 }
616
617 REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask);
618
619 REGWRITE_BUFFER_FLUSH(ah);
620 DISABLE_REGWRITE_BUFFER(ah);
621
622 if (tx_chainmask == 0x5) {
623 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
624 AR_PHY_SWAP_ALT_CHAIN);
625 }
626 if (AR_SREV_9100(ah))
627 REG_WRITE(ah, AR_PHY_ANALOG_SWAP,
628 REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001);
629}
630
631static void ar5008_hw_override_ini(struct ath_hw *ah,
632 struct ath9k_channel *chan)
633{
634 u32 val;
635
636 /*
637 * Set the RX_ABORT and RX_DIS and clear if off only after
638 * RXE is set for MAC. This prevents frames with corrupted
639 * descriptor status.
640 */
641 REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
642
643 if (AR_SREV_9280_10_OR_LATER(ah)) {
644 val = REG_READ(ah, AR_PCU_MISC_MODE2);
645
646 if (!AR_SREV_9271(ah))
647 val &= ~AR_PCU_MISC_MODE2_HWWAR1;
648
649 if (AR_SREV_9287_10_OR_LATER(ah))
650 val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
651
652 REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
653 }
654
655 if (!AR_SREV_5416_20_OR_LATER(ah) ||
656 AR_SREV_9280_10_OR_LATER(ah))
657 return;
658 /*
659 * Disable BB clock gating
660 * Necessary to avoid issues on AR5416 2.0
661 */
662 REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
663
664 /*
665 * Disable RIFS search on some chips to avoid baseband
666 * hang issues.
667 */
668 if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) {
669 val = REG_READ(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS);
670 val &= ~AR_PHY_RIFS_INIT_DELAY;
671 REG_WRITE(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS, val);
672 }
673}
674
675static void ar5008_hw_set_channel_regs(struct ath_hw *ah,
676 struct ath9k_channel *chan)
677{
678 u32 phymode;
679 u32 enableDacFifo = 0;
680
681 if (AR_SREV_9285_10_OR_LATER(ah))
682 enableDacFifo = (REG_READ(ah, AR_PHY_TURBO) &
683 AR_PHY_FC_ENABLE_DAC_FIFO);
684
685 phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40
686 | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH | enableDacFifo;
687
688 if (IS_CHAN_HT40(chan)) {
689 phymode |= AR_PHY_FC_DYN2040_EN;
690
691 if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
692 (chan->chanmode == CHANNEL_G_HT40PLUS))
693 phymode |= AR_PHY_FC_DYN2040_PRI_CH;
694
695 }
696 REG_WRITE(ah, AR_PHY_TURBO, phymode);
697
698 ath9k_hw_set11nmac2040(ah);
699
700 ENABLE_REGWRITE_BUFFER(ah);
701
702 REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
703 REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
704
705 REGWRITE_BUFFER_FLUSH(ah);
706 DISABLE_REGWRITE_BUFFER(ah);
707}
708
709
710static int ar5008_hw_process_ini(struct ath_hw *ah,
711 struct ath9k_channel *chan)
712{
713 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
714 int i, regWrites = 0;
715 struct ieee80211_channel *channel = chan->chan;
716 u32 modesIndex, freqIndex;
717
718 switch (chan->chanmode) {
719 case CHANNEL_A:
720 case CHANNEL_A_HT20:
721 modesIndex = 1;
722 freqIndex = 1;
723 break;
724 case CHANNEL_A_HT40PLUS:
725 case CHANNEL_A_HT40MINUS:
726 modesIndex = 2;
727 freqIndex = 1;
728 break;
729 case CHANNEL_G:
730 case CHANNEL_G_HT20:
731 case CHANNEL_B:
732 modesIndex = 4;
733 freqIndex = 2;
734 break;
735 case CHANNEL_G_HT40PLUS:
736 case CHANNEL_G_HT40MINUS:
737 modesIndex = 3;
738 freqIndex = 2;
739 break;
740
741 default:
742 return -EINVAL;
743 }
744
745 if (AR_SREV_9287_12_OR_LATER(ah)) {
746 /* Enable ASYNC FIFO */
747 REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
748 AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL);
749 REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO);
750 REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
751 AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
752 REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
753 AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
754 }
755
756 /*
757 * Set correct baseband to analog shift setting to
758 * access analog chips.
759 */
760 REG_WRITE(ah, AR_PHY(0), 0x00000007);
761
762 /* Write ADDAC shifts */
763 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
764 ah->eep_ops->set_addac(ah, chan);
765
766 if (AR_SREV_5416_22_OR_LATER(ah)) {
767 REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
768 } else {
769 struct ar5416IniArray temp;
770 u32 addacSize =
771 sizeof(u32) * ah->iniAddac.ia_rows *
772 ah->iniAddac.ia_columns;
773
774 /* For AR5416 2.0/2.1 */
775 memcpy(ah->addac5416_21,
776 ah->iniAddac.ia_array, addacSize);
777
778 /* override CLKDRV value at [row, column] = [31, 1] */
779 (ah->addac5416_21)[31 * ah->iniAddac.ia_columns + 1] = 0;
780
781 temp.ia_array = ah->addac5416_21;
782 temp.ia_columns = ah->iniAddac.ia_columns;
783 temp.ia_rows = ah->iniAddac.ia_rows;
784 REG_WRITE_ARRAY(&temp, 1, regWrites);
785 }
786
787 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
788
789 ENABLE_REGWRITE_BUFFER(ah);
790
791 for (i = 0; i < ah->iniModes.ia_rows; i++) {
792 u32 reg = INI_RA(&ah->iniModes, i, 0);
793 u32 val = INI_RA(&ah->iniModes, i, modesIndex);
794
795 if (reg == AR_AN_TOP2 && ah->need_an_top2_fixup)
796 val &= ~AR_AN_TOP2_PWDCLKIND;
797
798 REG_WRITE(ah, reg, val);
799
800 if (reg >= 0x7800 && reg < 0x78a0
801 && ah->config.analog_shiftreg) {
802 udelay(100);
803 }
804
805 DO_DELAY(regWrites);
806 }
807
808 REGWRITE_BUFFER_FLUSH(ah);
809 DISABLE_REGWRITE_BUFFER(ah);
810
811 if (AR_SREV_9280(ah) || AR_SREV_9287_10_OR_LATER(ah))
812 REG_WRITE_ARRAY(&ah->iniModesRxGain, modesIndex, regWrites);
813
814 if (AR_SREV_9280(ah) || AR_SREV_9285_12_OR_LATER(ah) ||
815 AR_SREV_9287_10_OR_LATER(ah))
816 REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
817
818 if (AR_SREV_9271_10(ah))
819 REG_WRITE_ARRAY(&ah->iniModes_9271_1_0_only,
820 modesIndex, regWrites);
821
822 ENABLE_REGWRITE_BUFFER(ah);
823
824 /* Write common array parameters */
825 for (i = 0; i < ah->iniCommon.ia_rows; i++) {
826 u32 reg = INI_RA(&ah->iniCommon, i, 0);
827 u32 val = INI_RA(&ah->iniCommon, i, 1);
828
829 REG_WRITE(ah, reg, val);
830
831 if (reg >= 0x7800 && reg < 0x78a0
832 && ah->config.analog_shiftreg) {
833 udelay(100);
834 }
835
836 DO_DELAY(regWrites);
837 }
838
839 REGWRITE_BUFFER_FLUSH(ah);
840 DISABLE_REGWRITE_BUFFER(ah);
841
842 if (AR_SREV_9271(ah)) {
843 if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) == 1)
844 REG_WRITE_ARRAY(&ah->iniModes_high_power_tx_gain_9271,
845 modesIndex, regWrites);
846 else
847 REG_WRITE_ARRAY(&ah->iniModes_normal_power_tx_gain_9271,
848 modesIndex, regWrites);
849 }
850
851 REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites);
852
853 if (IS_CHAN_A_FAST_CLOCK(ah, chan)) {
854 REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex,
855 regWrites);
856 }
857
858 ar5008_hw_override_ini(ah, chan);
859 ar5008_hw_set_channel_regs(ah, chan);
860 ar5008_hw_init_chain_masks(ah);
861 ath9k_olc_init(ah);
862
863 /* Set TX power */
864 ah->eep_ops->set_txpower(ah, chan,
865 ath9k_regd_get_ctl(regulatory, chan),
866 channel->max_antenna_gain * 2,
867 channel->max_power * 2,
868 min((u32) MAX_RATE_POWER,
869 (u32) regulatory->power_limit));
870
871 /* Write analog registers */
872 if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
873 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
874 "ar5416SetRfRegs failed\n");
875 return -EIO;
876 }
877
878 return 0;
879}
880
881static void ar5008_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan)
882{
883 u32 rfMode = 0;
884
885 if (chan == NULL)
886 return;
887
888 rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
889 ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
890
891 if (!AR_SREV_9280_10_OR_LATER(ah))
892 rfMode |= (IS_CHAN_5GHZ(chan)) ?
893 AR_PHY_MODE_RF5GHZ : AR_PHY_MODE_RF2GHZ;
894
895 if (IS_CHAN_A_FAST_CLOCK(ah, chan))
896 rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
897
898 REG_WRITE(ah, AR_PHY_MODE, rfMode);
899}
900
901static void ar5008_hw_mark_phy_inactive(struct ath_hw *ah)
902{
903 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
904}
905
906static void ar5008_hw_set_delta_slope(struct ath_hw *ah,
907 struct ath9k_channel *chan)
908{
909 u32 coef_scaled, ds_coef_exp, ds_coef_man;
910 u32 clockMhzScaled = 0x64000000;
911 struct chan_centers centers;
912
913 if (IS_CHAN_HALF_RATE(chan))
914 clockMhzScaled = clockMhzScaled >> 1;
915 else if (IS_CHAN_QUARTER_RATE(chan))
916 clockMhzScaled = clockMhzScaled >> 2;
917
918 ath9k_hw_get_channel_centers(ah, chan, &centers);
919 coef_scaled = clockMhzScaled / centers.synth_center;
920
921 ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
922 &ds_coef_exp);
923
924 REG_RMW_FIELD(ah, AR_PHY_TIMING3,
925 AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
926 REG_RMW_FIELD(ah, AR_PHY_TIMING3,
927 AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
928
929 coef_scaled = (9 * coef_scaled) / 10;
930
931 ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
932 &ds_coef_exp);
933
934 REG_RMW_FIELD(ah, AR_PHY_HALFGI,
935 AR_PHY_HALFGI_DSC_MAN, ds_coef_man);
936 REG_RMW_FIELD(ah, AR_PHY_HALFGI,
937 AR_PHY_HALFGI_DSC_EXP, ds_coef_exp);
938}
939
940static bool ar5008_hw_rfbus_req(struct ath_hw *ah)
941{
942 REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
943 return ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
944 AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT);
945}
946
947static void ar5008_hw_rfbus_done(struct ath_hw *ah)
948{
949 u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
950 if (IS_CHAN_B(ah->curchan))
951 synthDelay = (4 * synthDelay) / 22;
952 else
953 synthDelay /= 10;
954
955 udelay(synthDelay + BASE_ACTIVATE_DELAY);
956
957 REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
958}
959
960static void ar5008_hw_enable_rfkill(struct ath_hw *ah)
961{
962 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
963 AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
964
965 REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
966 AR_GPIO_INPUT_MUX2_RFSILENT);
967
968 ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
969 REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
970}
971
972static void ar5008_restore_chainmask(struct ath_hw *ah)
973{
974 int rx_chainmask = ah->rxchainmask;
975
976 if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) {
977 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
978 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
979 }
980}
981
982static void ar5008_set_diversity(struct ath_hw *ah, bool value)
983{
984 u32 v = REG_READ(ah, AR_PHY_CCK_DETECT);
985 if (value)
986 v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
987 else
988 v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
989 REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
990}
991
992static u32 ar9100_hw_compute_pll_control(struct ath_hw *ah,
993 struct ath9k_channel *chan)
994{
995 if (chan && IS_CHAN_5GHZ(chan))
996 return 0x1450;
997 return 0x1458;
998}
999
1000static u32 ar9160_hw_compute_pll_control(struct ath_hw *ah,
1001 struct ath9k_channel *chan)
1002{
1003 u32 pll;
1004
1005 pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
1006
1007 if (chan && IS_CHAN_HALF_RATE(chan))
1008 pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
1009 else if (chan && IS_CHAN_QUARTER_RATE(chan))
1010 pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
1011
1012 if (chan && IS_CHAN_5GHZ(chan))
1013 pll |= SM(0x50, AR_RTC_9160_PLL_DIV);
1014 else
1015 pll |= SM(0x58, AR_RTC_9160_PLL_DIV);
1016
1017 return pll;
1018}
1019
1020static u32 ar5008_hw_compute_pll_control(struct ath_hw *ah,
1021 struct ath9k_channel *chan)
1022{
1023 u32 pll;
1024
1025 pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2;
1026
1027 if (chan && IS_CHAN_HALF_RATE(chan))
1028 pll |= SM(0x1, AR_RTC_PLL_CLKSEL);
1029 else if (chan && IS_CHAN_QUARTER_RATE(chan))
1030 pll |= SM(0x2, AR_RTC_PLL_CLKSEL);
1031
1032 if (chan && IS_CHAN_5GHZ(chan))
1033 pll |= SM(0xa, AR_RTC_PLL_DIV);
1034 else
1035 pll |= SM(0xb, AR_RTC_PLL_DIV);
1036
1037 return pll;
1038}
1039
1040static bool ar5008_hw_ani_control(struct ath_hw *ah,
1041 enum ath9k_ani_cmd cmd, int param)
1042{
1043 struct ar5416AniState *aniState = ah->curani;
1044 struct ath_common *common = ath9k_hw_common(ah);
1045
1046 switch (cmd & ah->ani_function) {
1047 case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
1048 u32 level = param;
1049
1050 if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
1051 ath_print(common, ATH_DBG_ANI,
1052 "level out of range (%u > %u)\n",
1053 level,
1054 (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
1055 return false;
1056 }
1057
1058 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
1059 AR_PHY_DESIRED_SZ_TOT_DES,
1060 ah->totalSizeDesired[level]);
1061 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
1062 AR_PHY_AGC_CTL1_COARSE_LOW,
1063 ah->coarse_low[level]);
1064 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
1065 AR_PHY_AGC_CTL1_COARSE_HIGH,
1066 ah->coarse_high[level]);
1067 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
1068 AR_PHY_FIND_SIG_FIRPWR,
1069 ah->firpwr[level]);
1070
1071 if (level > aniState->noiseImmunityLevel)
1072 ah->stats.ast_ani_niup++;
1073 else if (level < aniState->noiseImmunityLevel)
1074 ah->stats.ast_ani_nidown++;
1075 aniState->noiseImmunityLevel = level;
1076 break;
1077 }
1078 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
1079 const int m1ThreshLow[] = { 127, 50 };
1080 const int m2ThreshLow[] = { 127, 40 };
1081 const int m1Thresh[] = { 127, 0x4d };
1082 const int m2Thresh[] = { 127, 0x40 };
1083 const int m2CountThr[] = { 31, 16 };
1084 const int m2CountThrLow[] = { 63, 48 };
1085 u32 on = param ? 1 : 0;
1086
1087 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
1088 AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
1089 m1ThreshLow[on]);
1090 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
1091 AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
1092 m2ThreshLow[on]);
1093 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
1094 AR_PHY_SFCORR_M1_THRESH,
1095 m1Thresh[on]);
1096 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
1097 AR_PHY_SFCORR_M2_THRESH,
1098 m2Thresh[on]);
1099 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
1100 AR_PHY_SFCORR_M2COUNT_THR,
1101 m2CountThr[on]);
1102 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
1103 AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
1104 m2CountThrLow[on]);
1105
1106 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1107 AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
1108 m1ThreshLow[on]);
1109 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1110 AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
1111 m2ThreshLow[on]);
1112 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1113 AR_PHY_SFCORR_EXT_M1_THRESH,
1114 m1Thresh[on]);
1115 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1116 AR_PHY_SFCORR_EXT_M2_THRESH,
1117 m2Thresh[on]);
1118
1119 if (on)
1120 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
1121 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
1122 else
1123 REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
1124 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
1125
1126 if (!on != aniState->ofdmWeakSigDetectOff) {
1127 if (on)
1128 ah->stats.ast_ani_ofdmon++;
1129 else
1130 ah->stats.ast_ani_ofdmoff++;
1131 aniState->ofdmWeakSigDetectOff = !on;
1132 }
1133 break;
1134 }
1135 case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
1136 const int weakSigThrCck[] = { 8, 6 };
1137 u32 high = param ? 1 : 0;
1138
1139 REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
1140 AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
1141 weakSigThrCck[high]);
1142 if (high != aniState->cckWeakSigThreshold) {
1143 if (high)
1144 ah->stats.ast_ani_cckhigh++;
1145 else
1146 ah->stats.ast_ani_ccklow++;
1147 aniState->cckWeakSigThreshold = high;
1148 }
1149 break;
1150 }
1151 case ATH9K_ANI_FIRSTEP_LEVEL:{
1152 const int firstep[] = { 0, 4, 8 };
1153 u32 level = param;
1154
1155 if (level >= ARRAY_SIZE(firstep)) {
1156 ath_print(common, ATH_DBG_ANI,
1157 "level out of range (%u > %u)\n",
1158 level,
1159 (unsigned) ARRAY_SIZE(firstep));
1160 return false;
1161 }
1162 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
1163 AR_PHY_FIND_SIG_FIRSTEP,
1164 firstep[level]);
1165 if (level > aniState->firstepLevel)
1166 ah->stats.ast_ani_stepup++;
1167 else if (level < aniState->firstepLevel)
1168 ah->stats.ast_ani_stepdown++;
1169 aniState->firstepLevel = level;
1170 break;
1171 }
1172 case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
1173 const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
1174 u32 level = param;
1175
1176 if (level >= ARRAY_SIZE(cycpwrThr1)) {
1177 ath_print(common, ATH_DBG_ANI,
1178 "level out of range (%u > %u)\n",
1179 level,
1180 (unsigned) ARRAY_SIZE(cycpwrThr1));
1181 return false;
1182 }
1183 REG_RMW_FIELD(ah, AR_PHY_TIMING5,
1184 AR_PHY_TIMING5_CYCPWR_THR1,
1185 cycpwrThr1[level]);
1186 if (level > aniState->spurImmunityLevel)
1187 ah->stats.ast_ani_spurup++;
1188 else if (level < aniState->spurImmunityLevel)
1189 ah->stats.ast_ani_spurdown++;
1190 aniState->spurImmunityLevel = level;
1191 break;
1192 }
1193 case ATH9K_ANI_PRESENT:
1194 break;
1195 default:
1196 ath_print(common, ATH_DBG_ANI,
1197 "invalid cmd %u\n", cmd);
1198 return false;
1199 }
1200
1201 ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
1202 ath_print(common, ATH_DBG_ANI,
1203 "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
1204 "ofdmWeakSigDetectOff=%d\n",
1205 aniState->noiseImmunityLevel,
1206 aniState->spurImmunityLevel,
1207 !aniState->ofdmWeakSigDetectOff);
1208 ath_print(common, ATH_DBG_ANI,
1209 "cckWeakSigThreshold=%d, "
1210 "firstepLevel=%d, listenTime=%d\n",
1211 aniState->cckWeakSigThreshold,
1212 aniState->firstepLevel,
1213 aniState->listenTime);
1214 ath_print(common, ATH_DBG_ANI,
1215 "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
1216 aniState->cycleCount,
1217 aniState->ofdmPhyErrCount,
1218 aniState->cckPhyErrCount);
1219
1220 return true;
1221}
1222
1223static void ar5008_hw_do_getnf(struct ath_hw *ah,
1224 int16_t nfarray[NUM_NF_READINGS])
1225{
1226 struct ath_common *common = ath9k_hw_common(ah);
1227 int16_t nf;
1228
1229 nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
1230 if (nf & 0x100)
1231 nf = 0 - ((nf ^ 0x1ff) + 1);
1232 ath_print(common, ATH_DBG_CALIBRATE,
1233 "NF calibrated [ctl] [chain 0] is %d\n", nf);
1234 nfarray[0] = nf;
1235
1236 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR_PHY_CH1_MINCCA_PWR);
1237 if (nf & 0x100)
1238 nf = 0 - ((nf ^ 0x1ff) + 1);
1239 ath_print(common, ATH_DBG_CALIBRATE,
1240 "NF calibrated [ctl] [chain 1] is %d\n", nf);
1241 nfarray[1] = nf;
1242
1243 nf = MS(REG_READ(ah, AR_PHY_CH2_CCA), AR_PHY_CH2_MINCCA_PWR);
1244 if (nf & 0x100)
1245 nf = 0 - ((nf ^ 0x1ff) + 1);
1246 ath_print(common, ATH_DBG_CALIBRATE,
1247 "NF calibrated [ctl] [chain 2] is %d\n", nf);
1248 nfarray[2] = nf;
1249
1250 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
1251 if (nf & 0x100)
1252 nf = 0 - ((nf ^ 0x1ff) + 1);
1253 ath_print(common, ATH_DBG_CALIBRATE,
1254 "NF calibrated [ext] [chain 0] is %d\n", nf);
1255 nfarray[3] = nf;
1256
1257 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR_PHY_CH1_EXT_MINCCA_PWR);
1258 if (nf & 0x100)
1259 nf = 0 - ((nf ^ 0x1ff) + 1);
1260 ath_print(common, ATH_DBG_CALIBRATE,
1261 "NF calibrated [ext] [chain 1] is %d\n", nf);
1262 nfarray[4] = nf;
1263
1264 nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA), AR_PHY_CH2_EXT_MINCCA_PWR);
1265 if (nf & 0x100)
1266 nf = 0 - ((nf ^ 0x1ff) + 1);
1267 ath_print(common, ATH_DBG_CALIBRATE,
1268 "NF calibrated [ext] [chain 2] is %d\n", nf);
1269 nfarray[5] = nf;
1270}
1271
1272static void ar5008_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
1273{
1274 struct ath9k_nfcal_hist *h;
1275 int i, j;
1276 int32_t val;
1277 const u32 ar5416_cca_regs[6] = {
1278 AR_PHY_CCA,
1279 AR_PHY_CH1_CCA,
1280 AR_PHY_CH2_CCA,
1281 AR_PHY_EXT_CCA,
1282 AR_PHY_CH1_EXT_CCA,
1283 AR_PHY_CH2_EXT_CCA
1284 };
1285 u8 chainmask, rx_chain_status;
1286
1287 rx_chain_status = REG_READ(ah, AR_PHY_RX_CHAINMASK);
1288 if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
1289 chainmask = 0x9;
1290 else if (AR_SREV_9280(ah) || AR_SREV_9287(ah)) {
1291 if ((rx_chain_status & 0x2) || (rx_chain_status & 0x4))
1292 chainmask = 0x1B;
1293 else
1294 chainmask = 0x09;
1295 } else {
1296 if (rx_chain_status & 0x4)
1297 chainmask = 0x3F;
1298 else if (rx_chain_status & 0x2)
1299 chainmask = 0x1B;
1300 else
1301 chainmask = 0x09;
1302 }
1303
1304 h = ah->nfCalHist;
1305
1306 for (i = 0; i < NUM_NF_READINGS; i++) {
1307 if (chainmask & (1 << i)) {
1308 val = REG_READ(ah, ar5416_cca_regs[i]);
1309 val &= 0xFFFFFE00;
1310 val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
1311 REG_WRITE(ah, ar5416_cca_regs[i], val);
1312 }
1313 }
1314
1315 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
1316 AR_PHY_AGC_CONTROL_ENABLE_NF);
1317 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
1318 AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
1319 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
1320
1321 for (j = 0; j < 5; j++) {
1322 if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
1323 AR_PHY_AGC_CONTROL_NF) == 0)
1324 break;
1325 udelay(50);
1326 }
1327
1328 ENABLE_REGWRITE_BUFFER(ah);
1329
1330 for (i = 0; i < NUM_NF_READINGS; i++) {
1331 if (chainmask & (1 << i)) {
1332 val = REG_READ(ah, ar5416_cca_regs[i]);
1333 val &= 0xFFFFFE00;
1334 val |= (((u32) (-50) << 1) & 0x1ff);
1335 REG_WRITE(ah, ar5416_cca_regs[i], val);
1336 }
1337 }
1338
1339 REGWRITE_BUFFER_FLUSH(ah);
1340 DISABLE_REGWRITE_BUFFER(ah);
1341}
1342
1343void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
1344{
1345 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
1346
1347 priv_ops->rf_set_freq = ar5008_hw_set_channel;
1348 priv_ops->spur_mitigate_freq = ar5008_hw_spur_mitigate;
1349
1350 priv_ops->rf_alloc_ext_banks = ar5008_hw_rf_alloc_ext_banks;
1351 priv_ops->rf_free_ext_banks = ar5008_hw_rf_free_ext_banks;
1352 priv_ops->set_rf_regs = ar5008_hw_set_rf_regs;
1353 priv_ops->set_channel_regs = ar5008_hw_set_channel_regs;
1354 priv_ops->init_bb = ar5008_hw_init_bb;
1355 priv_ops->process_ini = ar5008_hw_process_ini;
1356 priv_ops->set_rfmode = ar5008_hw_set_rfmode;
1357 priv_ops->mark_phy_inactive = ar5008_hw_mark_phy_inactive;
1358 priv_ops->set_delta_slope = ar5008_hw_set_delta_slope;
1359 priv_ops->rfbus_req = ar5008_hw_rfbus_req;
1360 priv_ops->rfbus_done = ar5008_hw_rfbus_done;
1361 priv_ops->enable_rfkill = ar5008_hw_enable_rfkill;
1362 priv_ops->restore_chainmask = ar5008_restore_chainmask;
1363 priv_ops->set_diversity = ar5008_set_diversity;
1364 priv_ops->ani_control = ar5008_hw_ani_control;
1365 priv_ops->do_getnf = ar5008_hw_do_getnf;
1366 priv_ops->loadnf = ar5008_hw_loadnf;
1367
1368 if (AR_SREV_9100(ah))
1369 priv_ops->compute_pll_control = ar9100_hw_compute_pll_control;
1370 else if (AR_SREV_9160_10_OR_LATER(ah))
1371 priv_ops->compute_pll_control = ar9160_hw_compute_pll_control;
1372 else
1373 priv_ops->compute_pll_control = ar5008_hw_compute_pll_control;
1374}
diff --git a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
new file mode 100644
index 000000000000..0b94bd385b0a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
@@ -0,0 +1,1254 @@
1
2static const u32 ar5416Common_9100[][2] = {
3 { 0x0000000c, 0x00000000 },
4 { 0x00000030, 0x00020015 },
5 { 0x00000034, 0x00000005 },
6 { 0x00000040, 0x00000000 },
7 { 0x00000044, 0x00000008 },
8 { 0x00000048, 0x00000008 },
9 { 0x0000004c, 0x00000010 },
10 { 0x00000050, 0x00000000 },
11 { 0x00000054, 0x0000001f },
12 { 0x00000800, 0x00000000 },
13 { 0x00000804, 0x00000000 },
14 { 0x00000808, 0x00000000 },
15 { 0x0000080c, 0x00000000 },
16 { 0x00000810, 0x00000000 },
17 { 0x00000814, 0x00000000 },
18 { 0x00000818, 0x00000000 },
19 { 0x0000081c, 0x00000000 },
20 { 0x00000820, 0x00000000 },
21 { 0x00000824, 0x00000000 },
22 { 0x00001040, 0x002ffc0f },
23 { 0x00001044, 0x002ffc0f },
24 { 0x00001048, 0x002ffc0f },
25 { 0x0000104c, 0x002ffc0f },
26 { 0x00001050, 0x002ffc0f },
27 { 0x00001054, 0x002ffc0f },
28 { 0x00001058, 0x002ffc0f },
29 { 0x0000105c, 0x002ffc0f },
30 { 0x00001060, 0x002ffc0f },
31 { 0x00001064, 0x002ffc0f },
32 { 0x00001230, 0x00000000 },
33 { 0x00001270, 0x00000000 },
34 { 0x00001038, 0x00000000 },
35 { 0x00001078, 0x00000000 },
36 { 0x000010b8, 0x00000000 },
37 { 0x000010f8, 0x00000000 },
38 { 0x00001138, 0x00000000 },
39 { 0x00001178, 0x00000000 },
40 { 0x000011b8, 0x00000000 },
41 { 0x000011f8, 0x00000000 },
42 { 0x00001238, 0x00000000 },
43 { 0x00001278, 0x00000000 },
44 { 0x000012b8, 0x00000000 },
45 { 0x000012f8, 0x00000000 },
46 { 0x00001338, 0x00000000 },
47 { 0x00001378, 0x00000000 },
48 { 0x000013b8, 0x00000000 },
49 { 0x000013f8, 0x00000000 },
50 { 0x00001438, 0x00000000 },
51 { 0x00001478, 0x00000000 },
52 { 0x000014b8, 0x00000000 },
53 { 0x000014f8, 0x00000000 },
54 { 0x00001538, 0x00000000 },
55 { 0x00001578, 0x00000000 },
56 { 0x000015b8, 0x00000000 },
57 { 0x000015f8, 0x00000000 },
58 { 0x00001638, 0x00000000 },
59 { 0x00001678, 0x00000000 },
60 { 0x000016b8, 0x00000000 },
61 { 0x000016f8, 0x00000000 },
62 { 0x00001738, 0x00000000 },
63 { 0x00001778, 0x00000000 },
64 { 0x000017b8, 0x00000000 },
65 { 0x000017f8, 0x00000000 },
66 { 0x0000103c, 0x00000000 },
67 { 0x0000107c, 0x00000000 },
68 { 0x000010bc, 0x00000000 },
69 { 0x000010fc, 0x00000000 },
70 { 0x0000113c, 0x00000000 },
71 { 0x0000117c, 0x00000000 },
72 { 0x000011bc, 0x00000000 },
73 { 0x000011fc, 0x00000000 },
74 { 0x0000123c, 0x00000000 },
75 { 0x0000127c, 0x00000000 },
76 { 0x000012bc, 0x00000000 },
77 { 0x000012fc, 0x00000000 },
78 { 0x0000133c, 0x00000000 },
79 { 0x0000137c, 0x00000000 },
80 { 0x000013bc, 0x00000000 },
81 { 0x000013fc, 0x00000000 },
82 { 0x0000143c, 0x00000000 },
83 { 0x0000147c, 0x00000000 },
84 { 0x00020010, 0x00000003 },
85 { 0x00020038, 0x000004c2 },
86 { 0x00008004, 0x00000000 },
87 { 0x00008008, 0x00000000 },
88 { 0x0000800c, 0x00000000 },
89 { 0x00008018, 0x00000700 },
90 { 0x00008020, 0x00000000 },
91 { 0x00008038, 0x00000000 },
92 { 0x0000803c, 0x00000000 },
93 { 0x00008048, 0x40000000 },
94 { 0x00008054, 0x00004000 },
95 { 0x00008058, 0x00000000 },
96 { 0x0000805c, 0x000fc78f },
97 { 0x00008060, 0x0000000f },
98 { 0x00008064, 0x00000000 },
99 { 0x000080c0, 0x2a82301a },
100 { 0x000080c4, 0x05dc01e0 },
101 { 0x000080c8, 0x1f402710 },
102 { 0x000080cc, 0x01f40000 },
103 { 0x000080d0, 0x00001e00 },
104 { 0x000080d4, 0x00000000 },
105 { 0x000080d8, 0x00400000 },
106 { 0x000080e0, 0xffffffff },
107 { 0x000080e4, 0x0000ffff },
108 { 0x000080e8, 0x003f3f3f },
109 { 0x000080ec, 0x00000000 },
110 { 0x000080f0, 0x00000000 },
111 { 0x000080f4, 0x00000000 },
112 { 0x000080f8, 0x00000000 },
113 { 0x000080fc, 0x00020000 },
114 { 0x00008100, 0x00020000 },
115 { 0x00008104, 0x00000001 },
116 { 0x00008108, 0x00000052 },
117 { 0x0000810c, 0x00000000 },
118 { 0x00008110, 0x00000168 },
119 { 0x00008118, 0x000100aa },
120 { 0x0000811c, 0x00003210 },
121 { 0x00008120, 0x08f04800 },
122 { 0x00008124, 0x00000000 },
123 { 0x00008128, 0x00000000 },
124 { 0x0000812c, 0x00000000 },
125 { 0x00008130, 0x00000000 },
126 { 0x00008134, 0x00000000 },
127 { 0x00008138, 0x00000000 },
128 { 0x0000813c, 0x00000000 },
129 { 0x00008144, 0x00000000 },
130 { 0x00008168, 0x00000000 },
131 { 0x0000816c, 0x00000000 },
132 { 0x00008170, 0x32143320 },
133 { 0x00008174, 0xfaa4fa50 },
134 { 0x00008178, 0x00000100 },
135 { 0x0000817c, 0x00000000 },
136 { 0x000081c4, 0x00000000 },
137 { 0x000081d0, 0x00003210 },
138 { 0x000081ec, 0x00000000 },
139 { 0x000081f0, 0x00000000 },
140 { 0x000081f4, 0x00000000 },
141 { 0x000081f8, 0x00000000 },
142 { 0x000081fc, 0x00000000 },
143 { 0x00008200, 0x00000000 },
144 { 0x00008204, 0x00000000 },
145 { 0x00008208, 0x00000000 },
146 { 0x0000820c, 0x00000000 },
147 { 0x00008210, 0x00000000 },
148 { 0x00008214, 0x00000000 },
149 { 0x00008218, 0x00000000 },
150 { 0x0000821c, 0x00000000 },
151 { 0x00008220, 0x00000000 },
152 { 0x00008224, 0x00000000 },
153 { 0x00008228, 0x00000000 },
154 { 0x0000822c, 0x00000000 },
155 { 0x00008230, 0x00000000 },
156 { 0x00008234, 0x00000000 },
157 { 0x00008238, 0x00000000 },
158 { 0x0000823c, 0x00000000 },
159 { 0x00008240, 0x00100000 },
160 { 0x00008244, 0x0010f400 },
161 { 0x00008248, 0x00000100 },
162 { 0x0000824c, 0x0001e800 },
163 { 0x00008250, 0x00000000 },
164 { 0x00008254, 0x00000000 },
165 { 0x00008258, 0x00000000 },
166 { 0x0000825c, 0x400000ff },
167 { 0x00008260, 0x00080922 },
168 { 0x00008270, 0x00000000 },
169 { 0x00008274, 0x40000000 },
170 { 0x00008278, 0x003e4180 },
171 { 0x0000827c, 0x00000000 },
172 { 0x00008284, 0x0000002c },
173 { 0x00008288, 0x0000002c },
174 { 0x0000828c, 0x00000000 },
175 { 0x00008294, 0x00000000 },
176 { 0x00008298, 0x00000000 },
177 { 0x00008300, 0x00000000 },
178 { 0x00008304, 0x00000000 },
179 { 0x00008308, 0x00000000 },
180 { 0x0000830c, 0x00000000 },
181 { 0x00008310, 0x00000000 },
182 { 0x00008314, 0x00000000 },
183 { 0x00008318, 0x00000000 },
184 { 0x00008328, 0x00000000 },
185 { 0x0000832c, 0x00000007 },
186 { 0x00008330, 0x00000302 },
187 { 0x00008334, 0x00000e00 },
188 { 0x00008338, 0x00000000 },
189 { 0x0000833c, 0x00000000 },
190 { 0x00008340, 0x000107ff },
191 { 0x00009808, 0x00000000 },
192 { 0x0000980c, 0xad848e19 },
193 { 0x00009810, 0x7d14e000 },
194 { 0x00009814, 0x9c0a9f6b },
195 { 0x0000981c, 0x00000000 },
196 { 0x0000982c, 0x0000a000 },
197 { 0x00009830, 0x00000000 },
198 { 0x0000983c, 0x00200400 },
199 { 0x00009840, 0x206a01ae },
200 { 0x0000984c, 0x1284233c },
201 { 0x00009854, 0x00000859 },
202 { 0x00009900, 0x00000000 },
203 { 0x00009904, 0x00000000 },
204 { 0x00009908, 0x00000000 },
205 { 0x0000990c, 0x00000000 },
206 { 0x0000991c, 0x10000fff },
207 { 0x00009920, 0x05100000 },
208 { 0x0000a920, 0x05100000 },
209 { 0x0000b920, 0x05100000 },
210 { 0x00009928, 0x00000001 },
211 { 0x0000992c, 0x00000004 },
212 { 0x00009934, 0x1e1f2022 },
213 { 0x00009938, 0x0a0b0c0d },
214 { 0x0000993c, 0x00000000 },
215 { 0x00009948, 0x9280b212 },
216 { 0x0000994c, 0x00020028 },
217 { 0x0000c95c, 0x004b6a8e },
218 { 0x0000c968, 0x000003ce },
219 { 0x00009970, 0x190fb515 },
220 { 0x00009974, 0x00000000 },
221 { 0x00009978, 0x00000001 },
222 { 0x0000997c, 0x00000000 },
223 { 0x00009980, 0x00000000 },
224 { 0x00009984, 0x00000000 },
225 { 0x00009988, 0x00000000 },
226 { 0x0000998c, 0x00000000 },
227 { 0x00009990, 0x00000000 },
228 { 0x00009994, 0x00000000 },
229 { 0x00009998, 0x00000000 },
230 { 0x0000999c, 0x00000000 },
231 { 0x000099a0, 0x00000000 },
232 { 0x000099a4, 0x00000001 },
233 { 0x000099a8, 0x201fff00 },
234 { 0x000099ac, 0x006f0000 },
235 { 0x000099b0, 0x03051000 },
236 { 0x000099dc, 0x00000000 },
237 { 0x000099e0, 0x00000200 },
238 { 0x000099e4, 0xaaaaaaaa },
239 { 0x000099e8, 0x3c466478 },
240 { 0x000099ec, 0x0cc80caa },
241 { 0x000099fc, 0x00001042 },
242 { 0x00009b00, 0x00000000 },
243 { 0x00009b04, 0x00000001 },
244 { 0x00009b08, 0x00000002 },
245 { 0x00009b0c, 0x00000003 },
246 { 0x00009b10, 0x00000004 },
247 { 0x00009b14, 0x00000005 },
248 { 0x00009b18, 0x00000008 },
249 { 0x00009b1c, 0x00000009 },
250 { 0x00009b20, 0x0000000a },
251 { 0x00009b24, 0x0000000b },
252 { 0x00009b28, 0x0000000c },
253 { 0x00009b2c, 0x0000000d },
254 { 0x00009b30, 0x00000010 },
255 { 0x00009b34, 0x00000011 },
256 { 0x00009b38, 0x00000012 },
257 { 0x00009b3c, 0x00000013 },
258 { 0x00009b40, 0x00000014 },
259 { 0x00009b44, 0x00000015 },
260 { 0x00009b48, 0x00000018 },
261 { 0x00009b4c, 0x00000019 },
262 { 0x00009b50, 0x0000001a },
263 { 0x00009b54, 0x0000001b },
264 { 0x00009b58, 0x0000001c },
265 { 0x00009b5c, 0x0000001d },
266 { 0x00009b60, 0x00000020 },
267 { 0x00009b64, 0x00000021 },
268 { 0x00009b68, 0x00000022 },
269 { 0x00009b6c, 0x00000023 },
270 { 0x00009b70, 0x00000024 },
271 { 0x00009b74, 0x00000025 },
272 { 0x00009b78, 0x00000028 },
273 { 0x00009b7c, 0x00000029 },
274 { 0x00009b80, 0x0000002a },
275 { 0x00009b84, 0x0000002b },
276 { 0x00009b88, 0x0000002c },
277 { 0x00009b8c, 0x0000002d },
278 { 0x00009b90, 0x00000030 },
279 { 0x00009b94, 0x00000031 },
280 { 0x00009b98, 0x00000032 },
281 { 0x00009b9c, 0x00000033 },
282 { 0x00009ba0, 0x00000034 },
283 { 0x00009ba4, 0x00000035 },
284 { 0x00009ba8, 0x00000035 },
285 { 0x00009bac, 0x00000035 },
286 { 0x00009bb0, 0x00000035 },
287 { 0x00009bb4, 0x00000035 },
288 { 0x00009bb8, 0x00000035 },
289 { 0x00009bbc, 0x00000035 },
290 { 0x00009bc0, 0x00000035 },
291 { 0x00009bc4, 0x00000035 },
292 { 0x00009bc8, 0x00000035 },
293 { 0x00009bcc, 0x00000035 },
294 { 0x00009bd0, 0x00000035 },
295 { 0x00009bd4, 0x00000035 },
296 { 0x00009bd8, 0x00000035 },
297 { 0x00009bdc, 0x00000035 },
298 { 0x00009be0, 0x00000035 },
299 { 0x00009be4, 0x00000035 },
300 { 0x00009be8, 0x00000035 },
301 { 0x00009bec, 0x00000035 },
302 { 0x00009bf0, 0x00000035 },
303 { 0x00009bf4, 0x00000035 },
304 { 0x00009bf8, 0x00000010 },
305 { 0x00009bfc, 0x0000001a },
306 { 0x0000a210, 0x40806333 },
307 { 0x0000a214, 0x00106c10 },
308 { 0x0000a218, 0x009c4060 },
309 { 0x0000a220, 0x018830c6 },
310 { 0x0000a224, 0x00000400 },
311 { 0x0000a228, 0x001a0bb5 },
312 { 0x0000a22c, 0x00000000 },
313 { 0x0000a234, 0x20202020 },
314 { 0x0000a238, 0x20202020 },
315 { 0x0000a23c, 0x13c889ae },
316 { 0x0000a240, 0x38490a20 },
317 { 0x0000a244, 0x00007bb6 },
318 { 0x0000a248, 0x0fff3ffc },
319 { 0x0000a24c, 0x00000001 },
320 { 0x0000a250, 0x0000a000 },
321 { 0x0000a254, 0x00000000 },
322 { 0x0000a258, 0x0cc75380 },
323 { 0x0000a25c, 0x0f0f0f01 },
324 { 0x0000a260, 0xdfa91f01 },
325 { 0x0000a268, 0x00000001 },
326 { 0x0000a26c, 0x0ebae9c6 },
327 { 0x0000b26c, 0x0ebae9c6 },
328 { 0x0000c26c, 0x0ebae9c6 },
329 { 0x0000d270, 0x00820820 },
330 { 0x0000a278, 0x1ce739ce },
331 { 0x0000a27c, 0x050701ce },
332 { 0x0000a338, 0x00000000 },
333 { 0x0000a33c, 0x00000000 },
334 { 0x0000a340, 0x00000000 },
335 { 0x0000a344, 0x00000000 },
336 { 0x0000a348, 0x3fffffff },
337 { 0x0000a34c, 0x3fffffff },
338 { 0x0000a350, 0x3fffffff },
339 { 0x0000a354, 0x0003ffff },
340 { 0x0000a358, 0x79a8aa33 },
341 { 0x0000d35c, 0x07ffffef },
342 { 0x0000d360, 0x0fffffe7 },
343 { 0x0000d364, 0x17ffffe5 },
344 { 0x0000d368, 0x1fffffe4 },
345 { 0x0000d36c, 0x37ffffe3 },
346 { 0x0000d370, 0x3fffffe3 },
347 { 0x0000d374, 0x57ffffe3 },
348 { 0x0000d378, 0x5fffffe2 },
349 { 0x0000d37c, 0x7fffffe2 },
350 { 0x0000d380, 0x7f3c7bba },
351 { 0x0000d384, 0xf3307ff0 },
352 { 0x0000a388, 0x0c000000 },
353 { 0x0000a38c, 0x20202020 },
354 { 0x0000a390, 0x20202020 },
355 { 0x0000a394, 0x1ce739ce },
356 { 0x0000a398, 0x000001ce },
357 { 0x0000a39c, 0x00000001 },
358 { 0x0000a3a0, 0x00000000 },
359 { 0x0000a3a4, 0x00000000 },
360 { 0x0000a3a8, 0x00000000 },
361 { 0x0000a3ac, 0x00000000 },
362 { 0x0000a3b0, 0x00000000 },
363 { 0x0000a3b4, 0x00000000 },
364 { 0x0000a3b8, 0x00000000 },
365 { 0x0000a3bc, 0x00000000 },
366 { 0x0000a3c0, 0x00000000 },
367 { 0x0000a3c4, 0x00000000 },
368 { 0x0000a3c8, 0x00000246 },
369 { 0x0000a3cc, 0x20202020 },
370 { 0x0000a3d0, 0x20202020 },
371 { 0x0000a3d4, 0x20202020 },
372 { 0x0000a3dc, 0x1ce739ce },
373 { 0x0000a3e0, 0x000001ce },
374};
375
376static const u32 ar5416Bank0_9100[][2] = {
377 { 0x000098b0, 0x1e5795e5 },
378 { 0x000098e0, 0x02008020 },
379};
380
381static const u32 ar5416BB_RfGain_9100[][3] = {
382 { 0x00009a00, 0x00000000, 0x00000000 },
383 { 0x00009a04, 0x00000040, 0x00000040 },
384 { 0x00009a08, 0x00000080, 0x00000080 },
385 { 0x00009a0c, 0x000001a1, 0x00000141 },
386 { 0x00009a10, 0x000001e1, 0x00000181 },
387 { 0x00009a14, 0x00000021, 0x000001c1 },
388 { 0x00009a18, 0x00000061, 0x00000001 },
389 { 0x00009a1c, 0x00000168, 0x00000041 },
390 { 0x00009a20, 0x000001a8, 0x000001a8 },
391 { 0x00009a24, 0x000001e8, 0x000001e8 },
392 { 0x00009a28, 0x00000028, 0x00000028 },
393 { 0x00009a2c, 0x00000068, 0x00000068 },
394 { 0x00009a30, 0x00000189, 0x000000a8 },
395 { 0x00009a34, 0x000001c9, 0x00000169 },
396 { 0x00009a38, 0x00000009, 0x000001a9 },
397 { 0x00009a3c, 0x00000049, 0x000001e9 },
398 { 0x00009a40, 0x00000089, 0x00000029 },
399 { 0x00009a44, 0x00000170, 0x00000069 },
400 { 0x00009a48, 0x000001b0, 0x00000190 },
401 { 0x00009a4c, 0x000001f0, 0x000001d0 },
402 { 0x00009a50, 0x00000030, 0x00000010 },
403 { 0x00009a54, 0x00000070, 0x00000050 },
404 { 0x00009a58, 0x00000191, 0x00000090 },
405 { 0x00009a5c, 0x000001d1, 0x00000151 },
406 { 0x00009a60, 0x00000011, 0x00000191 },
407 { 0x00009a64, 0x00000051, 0x000001d1 },
408 { 0x00009a68, 0x00000091, 0x00000011 },
409 { 0x00009a6c, 0x000001b8, 0x00000051 },
410 { 0x00009a70, 0x000001f8, 0x00000198 },
411 { 0x00009a74, 0x00000038, 0x000001d8 },
412 { 0x00009a78, 0x00000078, 0x00000018 },
413 { 0x00009a7c, 0x00000199, 0x00000058 },
414 { 0x00009a80, 0x000001d9, 0x00000098 },
415 { 0x00009a84, 0x00000019, 0x00000159 },
416 { 0x00009a88, 0x00000059, 0x00000199 },
417 { 0x00009a8c, 0x00000099, 0x000001d9 },
418 { 0x00009a90, 0x000000d9, 0x00000019 },
419 { 0x00009a94, 0x000000f9, 0x00000059 },
420 { 0x00009a98, 0x000000f9, 0x00000099 },
421 { 0x00009a9c, 0x000000f9, 0x000000d9 },
422 { 0x00009aa0, 0x000000f9, 0x000000f9 },
423 { 0x00009aa4, 0x000000f9, 0x000000f9 },
424 { 0x00009aa8, 0x000000f9, 0x000000f9 },
425 { 0x00009aac, 0x000000f9, 0x000000f9 },
426 { 0x00009ab0, 0x000000f9, 0x000000f9 },
427 { 0x00009ab4, 0x000000f9, 0x000000f9 },
428 { 0x00009ab8, 0x000000f9, 0x000000f9 },
429 { 0x00009abc, 0x000000f9, 0x000000f9 },
430 { 0x00009ac0, 0x000000f9, 0x000000f9 },
431 { 0x00009ac4, 0x000000f9, 0x000000f9 },
432 { 0x00009ac8, 0x000000f9, 0x000000f9 },
433 { 0x00009acc, 0x000000f9, 0x000000f9 },
434 { 0x00009ad0, 0x000000f9, 0x000000f9 },
435 { 0x00009ad4, 0x000000f9, 0x000000f9 },
436 { 0x00009ad8, 0x000000f9, 0x000000f9 },
437 { 0x00009adc, 0x000000f9, 0x000000f9 },
438 { 0x00009ae0, 0x000000f9, 0x000000f9 },
439 { 0x00009ae4, 0x000000f9, 0x000000f9 },
440 { 0x00009ae8, 0x000000f9, 0x000000f9 },
441 { 0x00009aec, 0x000000f9, 0x000000f9 },
442 { 0x00009af0, 0x000000f9, 0x000000f9 },
443 { 0x00009af4, 0x000000f9, 0x000000f9 },
444 { 0x00009af8, 0x000000f9, 0x000000f9 },
445 { 0x00009afc, 0x000000f9, 0x000000f9 },
446};
447
448static const u32 ar5416Bank1_9100[][2] = {
449 { 0x000098b0, 0x02108421},
450 { 0x000098ec, 0x00000008},
451};
452
453static const u32 ar5416Bank2_9100[][2] = {
454 { 0x000098b0, 0x0e73ff17},
455 { 0x000098e0, 0x00000420},
456};
457
458static const u32 ar5416Bank3_9100[][3] = {
459 { 0x000098f0, 0x01400018, 0x01c00018 },
460};
461
462static const u32 ar5416Bank6_9100[][3] = {
463
464 { 0x0000989c, 0x00000000, 0x00000000 },
465 { 0x0000989c, 0x00000000, 0x00000000 },
466 { 0x0000989c, 0x00000000, 0x00000000 },
467 { 0x0000989c, 0x00e00000, 0x00e00000 },
468 { 0x0000989c, 0x005e0000, 0x005e0000 },
469 { 0x0000989c, 0x00120000, 0x00120000 },
470 { 0x0000989c, 0x00620000, 0x00620000 },
471 { 0x0000989c, 0x00020000, 0x00020000 },
472 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
473 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
474 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
475 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
476 { 0x0000989c, 0x005f0000, 0x005f0000 },
477 { 0x0000989c, 0x00870000, 0x00870000 },
478 { 0x0000989c, 0x00f90000, 0x00f90000 },
479 { 0x0000989c, 0x007b0000, 0x007b0000 },
480 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
481 { 0x0000989c, 0x00f50000, 0x00f50000 },
482 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
483 { 0x0000989c, 0x00110000, 0x00110000 },
484 { 0x0000989c, 0x006100a8, 0x006100a8 },
485 { 0x0000989c, 0x004210a2, 0x004210a2 },
486 { 0x0000989c, 0x0014000f, 0x0014000f },
487 { 0x0000989c, 0x00c40002, 0x00c40002 },
488 { 0x0000989c, 0x003000f2, 0x003000f2 },
489 { 0x0000989c, 0x00440016, 0x00440016 },
490 { 0x0000989c, 0x00410040, 0x00410040 },
491 { 0x0000989c, 0x000180d6, 0x000180d6 },
492 { 0x0000989c, 0x0000c0aa, 0x0000c0aa },
493 { 0x0000989c, 0x000000b1, 0x000000b1 },
494 { 0x0000989c, 0x00002000, 0x00002000 },
495 { 0x0000989c, 0x000000d4, 0x000000d4 },
496 { 0x000098d0, 0x0000000f, 0x0010000f },
497};
498
499
500static const u32 ar5416Bank6TPC_9100[][3] = {
501
502 { 0x0000989c, 0x00000000, 0x00000000 },
503 { 0x0000989c, 0x00000000, 0x00000000 },
504 { 0x0000989c, 0x00000000, 0x00000000 },
505 { 0x0000989c, 0x00e00000, 0x00e00000 },
506 { 0x0000989c, 0x005e0000, 0x005e0000 },
507 { 0x0000989c, 0x00120000, 0x00120000 },
508 { 0x0000989c, 0x00620000, 0x00620000 },
509 { 0x0000989c, 0x00020000, 0x00020000 },
510 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
511 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
512 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
513 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
514 { 0x0000989c, 0x005f0000, 0x005f0000 },
515 { 0x0000989c, 0x00870000, 0x00870000 },
516 { 0x0000989c, 0x00f90000, 0x00f90000 },
517 { 0x0000989c, 0x007b0000, 0x007b0000 },
518 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
519 { 0x0000989c, 0x00f50000, 0x00f50000 },
520 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
521 { 0x0000989c, 0x00110000, 0x00110000 },
522 { 0x0000989c, 0x006100a8, 0x006100a8 },
523 { 0x0000989c, 0x00423022, 0x00423022 },
524 { 0x0000989c, 0x2014008f, 0x2014008f },
525 { 0x0000989c, 0x00c40002, 0x00c40002 },
526 { 0x0000989c, 0x003000f2, 0x003000f2 },
527 { 0x0000989c, 0x00440016, 0x00440016 },
528 { 0x0000989c, 0x00410040, 0x00410040 },
529 { 0x0000989c, 0x0001805e, 0x0001805e },
530 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
531 { 0x0000989c, 0x000000e1, 0x000000e1 },
532 { 0x0000989c, 0x00007080, 0x00007080 },
533 { 0x0000989c, 0x000000d4, 0x000000d4 },
534 { 0x000098d0, 0x0000000f, 0x0010000f },
535};
536
537static const u32 ar5416Bank7_9100[][2] = {
538 { 0x0000989c, 0x00000500 },
539 { 0x0000989c, 0x00000800 },
540 { 0x000098cc, 0x0000000e },
541};
542
543static const u32 ar5416Addac_9100[][2] = {
544 {0x0000989c, 0x00000000 },
545 {0x0000989c, 0x00000000 },
546 {0x0000989c, 0x00000000 },
547 {0x0000989c, 0x00000000 },
548 {0x0000989c, 0x00000000 },
549 {0x0000989c, 0x00000000 },
550 {0x0000989c, 0x00000000 },
551 {0x0000989c, 0x00000010 },
552 {0x0000989c, 0x00000000 },
553 {0x0000989c, 0x00000000 },
554 {0x0000989c, 0x00000000 },
555 {0x0000989c, 0x00000000 },
556 {0x0000989c, 0x00000000 },
557 {0x0000989c, 0x00000000 },
558 {0x0000989c, 0x00000000 },
559 {0x0000989c, 0x00000000 },
560 {0x0000989c, 0x00000000 },
561 {0x0000989c, 0x00000000 },
562 {0x0000989c, 0x00000000 },
563 {0x0000989c, 0x00000000 },
564 {0x0000989c, 0x00000000 },
565 {0x0000989c, 0x000000c0 },
566 {0x0000989c, 0x00000015 },
567 {0x0000989c, 0x00000000 },
568 {0x0000989c, 0x00000000 },
569 {0x0000989c, 0x00000000 },
570 {0x0000989c, 0x00000000 },
571 {0x0000989c, 0x00000000 },
572 {0x0000989c, 0x00000000 },
573 {0x0000989c, 0x00000000 },
574 {0x0000989c, 0x00000000 },
575 {0x000098cc, 0x00000000 },
576};
577
578static const u32 ar5416Modes_9160[][6] = {
579 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
580 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
581 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
582 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
583 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
584 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
585 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
586 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
587 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
588 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
589 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
590 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
591 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
592 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
593 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
594 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
595 { 0x00009850, 0x6c48b4e2, 0x6c48b4e2, 0x6c48b0e2, 0x6c48b0e2, 0x6c48b0e2 },
596 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
597 { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e },
598 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
599 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
600 { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
601 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
602 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
603 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
604 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
605 { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
606 { 0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
607 { 0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
608 { 0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
609 { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
610 { 0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce },
611 { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00 },
612 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
613 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
614 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
615 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
616 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
617 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
618 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
619 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
620 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
621 { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
622 { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
623 { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
624 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
625 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
626 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
627 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
628 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
629 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
630 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
631 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
632 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
633 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
634 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
635 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
636 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
637 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
638 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
639 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
640 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
641};
642
643static const u32 ar5416Common_9160[][2] = {
644 { 0x0000000c, 0x00000000 },
645 { 0x00000030, 0x00020015 },
646 { 0x00000034, 0x00000005 },
647 { 0x00000040, 0x00000000 },
648 { 0x00000044, 0x00000008 },
649 { 0x00000048, 0x00000008 },
650 { 0x0000004c, 0x00000010 },
651 { 0x00000050, 0x00000000 },
652 { 0x00000054, 0x0000001f },
653 { 0x00000800, 0x00000000 },
654 { 0x00000804, 0x00000000 },
655 { 0x00000808, 0x00000000 },
656 { 0x0000080c, 0x00000000 },
657 { 0x00000810, 0x00000000 },
658 { 0x00000814, 0x00000000 },
659 { 0x00000818, 0x00000000 },
660 { 0x0000081c, 0x00000000 },
661 { 0x00000820, 0x00000000 },
662 { 0x00000824, 0x00000000 },
663 { 0x00001040, 0x002ffc0f },
664 { 0x00001044, 0x002ffc0f },
665 { 0x00001048, 0x002ffc0f },
666 { 0x0000104c, 0x002ffc0f },
667 { 0x00001050, 0x002ffc0f },
668 { 0x00001054, 0x002ffc0f },
669 { 0x00001058, 0x002ffc0f },
670 { 0x0000105c, 0x002ffc0f },
671 { 0x00001060, 0x002ffc0f },
672 { 0x00001064, 0x002ffc0f },
673 { 0x00001230, 0x00000000 },
674 { 0x00001270, 0x00000000 },
675 { 0x00001038, 0x00000000 },
676 { 0x00001078, 0x00000000 },
677 { 0x000010b8, 0x00000000 },
678 { 0x000010f8, 0x00000000 },
679 { 0x00001138, 0x00000000 },
680 { 0x00001178, 0x00000000 },
681 { 0x000011b8, 0x00000000 },
682 { 0x000011f8, 0x00000000 },
683 { 0x00001238, 0x00000000 },
684 { 0x00001278, 0x00000000 },
685 { 0x000012b8, 0x00000000 },
686 { 0x000012f8, 0x00000000 },
687 { 0x00001338, 0x00000000 },
688 { 0x00001378, 0x00000000 },
689 { 0x000013b8, 0x00000000 },
690 { 0x000013f8, 0x00000000 },
691 { 0x00001438, 0x00000000 },
692 { 0x00001478, 0x00000000 },
693 { 0x000014b8, 0x00000000 },
694 { 0x000014f8, 0x00000000 },
695 { 0x00001538, 0x00000000 },
696 { 0x00001578, 0x00000000 },
697 { 0x000015b8, 0x00000000 },
698 { 0x000015f8, 0x00000000 },
699 { 0x00001638, 0x00000000 },
700 { 0x00001678, 0x00000000 },
701 { 0x000016b8, 0x00000000 },
702 { 0x000016f8, 0x00000000 },
703 { 0x00001738, 0x00000000 },
704 { 0x00001778, 0x00000000 },
705 { 0x000017b8, 0x00000000 },
706 { 0x000017f8, 0x00000000 },
707 { 0x0000103c, 0x00000000 },
708 { 0x0000107c, 0x00000000 },
709 { 0x000010bc, 0x00000000 },
710 { 0x000010fc, 0x00000000 },
711 { 0x0000113c, 0x00000000 },
712 { 0x0000117c, 0x00000000 },
713 { 0x000011bc, 0x00000000 },
714 { 0x000011fc, 0x00000000 },
715 { 0x0000123c, 0x00000000 },
716 { 0x0000127c, 0x00000000 },
717 { 0x000012bc, 0x00000000 },
718 { 0x000012fc, 0x00000000 },
719 { 0x0000133c, 0x00000000 },
720 { 0x0000137c, 0x00000000 },
721 { 0x000013bc, 0x00000000 },
722 { 0x000013fc, 0x00000000 },
723 { 0x0000143c, 0x00000000 },
724 { 0x0000147c, 0x00000000 },
725 { 0x00004030, 0x00000002 },
726 { 0x0000403c, 0x00000002 },
727 { 0x00007010, 0x00000020 },
728 { 0x00007038, 0x000004c2 },
729 { 0x00008004, 0x00000000 },
730 { 0x00008008, 0x00000000 },
731 { 0x0000800c, 0x00000000 },
732 { 0x00008018, 0x00000700 },
733 { 0x00008020, 0x00000000 },
734 { 0x00008038, 0x00000000 },
735 { 0x0000803c, 0x00000000 },
736 { 0x00008048, 0x40000000 },
737 { 0x00008054, 0x00000000 },
738 { 0x00008058, 0x00000000 },
739 { 0x0000805c, 0x000fc78f },
740 { 0x00008060, 0x0000000f },
741 { 0x00008064, 0x00000000 },
742 { 0x000080c0, 0x2a82301a },
743 { 0x000080c4, 0x05dc01e0 },
744 { 0x000080c8, 0x1f402710 },
745 { 0x000080cc, 0x01f40000 },
746 { 0x000080d0, 0x00001e00 },
747 { 0x000080d4, 0x00000000 },
748 { 0x000080d8, 0x00400000 },
749 { 0x000080e0, 0xffffffff },
750 { 0x000080e4, 0x0000ffff },
751 { 0x000080e8, 0x003f3f3f },
752 { 0x000080ec, 0x00000000 },
753 { 0x000080f0, 0x00000000 },
754 { 0x000080f4, 0x00000000 },
755 { 0x000080f8, 0x00000000 },
756 { 0x000080fc, 0x00020000 },
757 { 0x00008100, 0x00020000 },
758 { 0x00008104, 0x00000001 },
759 { 0x00008108, 0x00000052 },
760 { 0x0000810c, 0x00000000 },
761 { 0x00008110, 0x00000168 },
762 { 0x00008118, 0x000100aa },
763 { 0x0000811c, 0x00003210 },
764 { 0x00008120, 0x08f04800 },
765 { 0x00008124, 0x00000000 },
766 { 0x00008128, 0x00000000 },
767 { 0x0000812c, 0x00000000 },
768 { 0x00008130, 0x00000000 },
769 { 0x00008134, 0x00000000 },
770 { 0x00008138, 0x00000000 },
771 { 0x0000813c, 0x00000000 },
772 { 0x00008144, 0xffffffff },
773 { 0x00008168, 0x00000000 },
774 { 0x0000816c, 0x00000000 },
775 { 0x00008170, 0x32143320 },
776 { 0x00008174, 0xfaa4fa50 },
777 { 0x00008178, 0x00000100 },
778 { 0x0000817c, 0x00000000 },
779 { 0x000081c4, 0x00000000 },
780 { 0x000081d0, 0x00003210 },
781 { 0x000081ec, 0x00000000 },
782 { 0x000081f0, 0x00000000 },
783 { 0x000081f4, 0x00000000 },
784 { 0x000081f8, 0x00000000 },
785 { 0x000081fc, 0x00000000 },
786 { 0x00008200, 0x00000000 },
787 { 0x00008204, 0x00000000 },
788 { 0x00008208, 0x00000000 },
789 { 0x0000820c, 0x00000000 },
790 { 0x00008210, 0x00000000 },
791 { 0x00008214, 0x00000000 },
792 { 0x00008218, 0x00000000 },
793 { 0x0000821c, 0x00000000 },
794 { 0x00008220, 0x00000000 },
795 { 0x00008224, 0x00000000 },
796 { 0x00008228, 0x00000000 },
797 { 0x0000822c, 0x00000000 },
798 { 0x00008230, 0x00000000 },
799 { 0x00008234, 0x00000000 },
800 { 0x00008238, 0x00000000 },
801 { 0x0000823c, 0x00000000 },
802 { 0x00008240, 0x00100000 },
803 { 0x00008244, 0x0010f400 },
804 { 0x00008248, 0x00000100 },
805 { 0x0000824c, 0x0001e800 },
806 { 0x00008250, 0x00000000 },
807 { 0x00008254, 0x00000000 },
808 { 0x00008258, 0x00000000 },
809 { 0x0000825c, 0x400000ff },
810 { 0x00008260, 0x00080922 },
811 { 0x00008270, 0x00000000 },
812 { 0x00008274, 0x40000000 },
813 { 0x00008278, 0x003e4180 },
814 { 0x0000827c, 0x00000000 },
815 { 0x00008284, 0x0000002c },
816 { 0x00008288, 0x0000002c },
817 { 0x0000828c, 0x00000000 },
818 { 0x00008294, 0x00000000 },
819 { 0x00008298, 0x00000000 },
820 { 0x00008300, 0x00000000 },
821 { 0x00008304, 0x00000000 },
822 { 0x00008308, 0x00000000 },
823 { 0x0000830c, 0x00000000 },
824 { 0x00008310, 0x00000000 },
825 { 0x00008314, 0x00000000 },
826 { 0x00008318, 0x00000000 },
827 { 0x00008328, 0x00000000 },
828 { 0x0000832c, 0x00000007 },
829 { 0x00008330, 0x00000302 },
830 { 0x00008334, 0x00000e00 },
831 { 0x00008338, 0x00ff0000 },
832 { 0x0000833c, 0x00000000 },
833 { 0x00008340, 0x000107ff },
834 { 0x00009808, 0x00000000 },
835 { 0x0000980c, 0xad848e19 },
836 { 0x00009810, 0x7d14e000 },
837 { 0x00009814, 0x9c0a9f6b },
838 { 0x0000981c, 0x00000000 },
839 { 0x0000982c, 0x0000a000 },
840 { 0x00009830, 0x00000000 },
841 { 0x0000983c, 0x00200400 },
842 { 0x00009840, 0x206a01ae },
843 { 0x0000984c, 0x1284233c },
844 { 0x00009854, 0x00000859 },
845 { 0x00009900, 0x00000000 },
846 { 0x00009904, 0x00000000 },
847 { 0x00009908, 0x00000000 },
848 { 0x0000990c, 0x00000000 },
849 { 0x0000991c, 0x10000fff },
850 { 0x00009920, 0x05100000 },
851 { 0x0000a920, 0x05100000 },
852 { 0x0000b920, 0x05100000 },
853 { 0x00009928, 0x00000001 },
854 { 0x0000992c, 0x00000004 },
855 { 0x00009934, 0x1e1f2022 },
856 { 0x00009938, 0x0a0b0c0d },
857 { 0x0000993c, 0x00000000 },
858 { 0x00009948, 0x9280b212 },
859 { 0x0000994c, 0x00020028 },
860 { 0x00009954, 0x5f3ca3de },
861 { 0x00009958, 0x2108ecff },
862 { 0x00009940, 0x00750604 },
863 { 0x0000c95c, 0x004b6a8e },
864 { 0x00009970, 0x190fb515 },
865 { 0x00009974, 0x00000000 },
866 { 0x00009978, 0x00000001 },
867 { 0x0000997c, 0x00000000 },
868 { 0x00009980, 0x00000000 },
869 { 0x00009984, 0x00000000 },
870 { 0x00009988, 0x00000000 },
871 { 0x0000998c, 0x00000000 },
872 { 0x00009990, 0x00000000 },
873 { 0x00009994, 0x00000000 },
874 { 0x00009998, 0x00000000 },
875 { 0x0000999c, 0x00000000 },
876 { 0x000099a0, 0x00000000 },
877 { 0x000099a4, 0x00000001 },
878 { 0x000099a8, 0x201fff00 },
879 { 0x000099ac, 0x006f0000 },
880 { 0x000099b0, 0x03051000 },
881 { 0x000099dc, 0x00000000 },
882 { 0x000099e0, 0x00000200 },
883 { 0x000099e4, 0xaaaaaaaa },
884 { 0x000099e8, 0x3c466478 },
885 { 0x000099ec, 0x0cc80caa },
886 { 0x000099fc, 0x00001042 },
887 { 0x00009b00, 0x00000000 },
888 { 0x00009b04, 0x00000001 },
889 { 0x00009b08, 0x00000002 },
890 { 0x00009b0c, 0x00000003 },
891 { 0x00009b10, 0x00000004 },
892 { 0x00009b14, 0x00000005 },
893 { 0x00009b18, 0x00000008 },
894 { 0x00009b1c, 0x00000009 },
895 { 0x00009b20, 0x0000000a },
896 { 0x00009b24, 0x0000000b },
897 { 0x00009b28, 0x0000000c },
898 { 0x00009b2c, 0x0000000d },
899 { 0x00009b30, 0x00000010 },
900 { 0x00009b34, 0x00000011 },
901 { 0x00009b38, 0x00000012 },
902 { 0x00009b3c, 0x00000013 },
903 { 0x00009b40, 0x00000014 },
904 { 0x00009b44, 0x00000015 },
905 { 0x00009b48, 0x00000018 },
906 { 0x00009b4c, 0x00000019 },
907 { 0x00009b50, 0x0000001a },
908 { 0x00009b54, 0x0000001b },
909 { 0x00009b58, 0x0000001c },
910 { 0x00009b5c, 0x0000001d },
911 { 0x00009b60, 0x00000020 },
912 { 0x00009b64, 0x00000021 },
913 { 0x00009b68, 0x00000022 },
914 { 0x00009b6c, 0x00000023 },
915 { 0x00009b70, 0x00000024 },
916 { 0x00009b74, 0x00000025 },
917 { 0x00009b78, 0x00000028 },
918 { 0x00009b7c, 0x00000029 },
919 { 0x00009b80, 0x0000002a },
920 { 0x00009b84, 0x0000002b },
921 { 0x00009b88, 0x0000002c },
922 { 0x00009b8c, 0x0000002d },
923 { 0x00009b90, 0x00000030 },
924 { 0x00009b94, 0x00000031 },
925 { 0x00009b98, 0x00000032 },
926 { 0x00009b9c, 0x00000033 },
927 { 0x00009ba0, 0x00000034 },
928 { 0x00009ba4, 0x00000035 },
929 { 0x00009ba8, 0x00000035 },
930 { 0x00009bac, 0x00000035 },
931 { 0x00009bb0, 0x00000035 },
932 { 0x00009bb4, 0x00000035 },
933 { 0x00009bb8, 0x00000035 },
934 { 0x00009bbc, 0x00000035 },
935 { 0x00009bc0, 0x00000035 },
936 { 0x00009bc4, 0x00000035 },
937 { 0x00009bc8, 0x00000035 },
938 { 0x00009bcc, 0x00000035 },
939 { 0x00009bd0, 0x00000035 },
940 { 0x00009bd4, 0x00000035 },
941 { 0x00009bd8, 0x00000035 },
942 { 0x00009bdc, 0x00000035 },
943 { 0x00009be0, 0x00000035 },
944 { 0x00009be4, 0x00000035 },
945 { 0x00009be8, 0x00000035 },
946 { 0x00009bec, 0x00000035 },
947 { 0x00009bf0, 0x00000035 },
948 { 0x00009bf4, 0x00000035 },
949 { 0x00009bf8, 0x00000010 },
950 { 0x00009bfc, 0x0000001a },
951 { 0x0000a210, 0x40806333 },
952 { 0x0000a214, 0x00106c10 },
953 { 0x0000a218, 0x009c4060 },
954 { 0x0000a220, 0x018830c6 },
955 { 0x0000a224, 0x00000400 },
956 { 0x0000a228, 0x001a0bb5 },
957 { 0x0000a22c, 0x00000000 },
958 { 0x0000a234, 0x20202020 },
959 { 0x0000a238, 0x20202020 },
960 { 0x0000a23c, 0x13c889af },
961 { 0x0000a240, 0x38490a20 },
962 { 0x0000a244, 0x00007bb6 },
963 { 0x0000a248, 0x0fff3ffc },
964 { 0x0000a24c, 0x00000001 },
965 { 0x0000a250, 0x0000e000 },
966 { 0x0000a254, 0x00000000 },
967 { 0x0000a258, 0x0cc75380 },
968 { 0x0000a25c, 0x0f0f0f01 },
969 { 0x0000a260, 0xdfa91f01 },
970 { 0x0000a268, 0x00000001 },
971 { 0x0000a26c, 0x0ebae9c6 },
972 { 0x0000b26c, 0x0ebae9c6 },
973 { 0x0000c26c, 0x0ebae9c6 },
974 { 0x0000d270, 0x00820820 },
975 { 0x0000a278, 0x1ce739ce },
976 { 0x0000a27c, 0x050701ce },
977 { 0x0000a338, 0x00000000 },
978 { 0x0000a33c, 0x00000000 },
979 { 0x0000a340, 0x00000000 },
980 { 0x0000a344, 0x00000000 },
981 { 0x0000a348, 0x3fffffff },
982 { 0x0000a34c, 0x3fffffff },
983 { 0x0000a350, 0x3fffffff },
984 { 0x0000a354, 0x0003ffff },
985 { 0x0000a358, 0x79bfaa03 },
986 { 0x0000d35c, 0x07ffffef },
987 { 0x0000d360, 0x0fffffe7 },
988 { 0x0000d364, 0x17ffffe5 },
989 { 0x0000d368, 0x1fffffe4 },
990 { 0x0000d36c, 0x37ffffe3 },
991 { 0x0000d370, 0x3fffffe3 },
992 { 0x0000d374, 0x57ffffe3 },
993 { 0x0000d378, 0x5fffffe2 },
994 { 0x0000d37c, 0x7fffffe2 },
995 { 0x0000d380, 0x7f3c7bba },
996 { 0x0000d384, 0xf3307ff0 },
997 { 0x0000a388, 0x0c000000 },
998 { 0x0000a38c, 0x20202020 },
999 { 0x0000a390, 0x20202020 },
1000 { 0x0000a394, 0x1ce739ce },
1001 { 0x0000a398, 0x000001ce },
1002 { 0x0000a39c, 0x00000001 },
1003 { 0x0000a3a0, 0x00000000 },
1004 { 0x0000a3a4, 0x00000000 },
1005 { 0x0000a3a8, 0x00000000 },
1006 { 0x0000a3ac, 0x00000000 },
1007 { 0x0000a3b0, 0x00000000 },
1008 { 0x0000a3b4, 0x00000000 },
1009 { 0x0000a3b8, 0x00000000 },
1010 { 0x0000a3bc, 0x00000000 },
1011 { 0x0000a3c0, 0x00000000 },
1012 { 0x0000a3c4, 0x00000000 },
1013 { 0x0000a3c8, 0x00000246 },
1014 { 0x0000a3cc, 0x20202020 },
1015 { 0x0000a3d0, 0x20202020 },
1016 { 0x0000a3d4, 0x20202020 },
1017 { 0x0000a3dc, 0x1ce739ce },
1018 { 0x0000a3e0, 0x000001ce },
1019};
1020
1021static const u32 ar5416Bank0_9160[][2] = {
1022 { 0x000098b0, 0x1e5795e5 },
1023 { 0x000098e0, 0x02008020 },
1024};
1025
1026static const u32 ar5416BB_RfGain_9160[][3] = {
1027 { 0x00009a00, 0x00000000, 0x00000000 },
1028 { 0x00009a04, 0x00000040, 0x00000040 },
1029 { 0x00009a08, 0x00000080, 0x00000080 },
1030 { 0x00009a0c, 0x000001a1, 0x00000141 },
1031 { 0x00009a10, 0x000001e1, 0x00000181 },
1032 { 0x00009a14, 0x00000021, 0x000001c1 },
1033 { 0x00009a18, 0x00000061, 0x00000001 },
1034 { 0x00009a1c, 0x00000168, 0x00000041 },
1035 { 0x00009a20, 0x000001a8, 0x000001a8 },
1036 { 0x00009a24, 0x000001e8, 0x000001e8 },
1037 { 0x00009a28, 0x00000028, 0x00000028 },
1038 { 0x00009a2c, 0x00000068, 0x00000068 },
1039 { 0x00009a30, 0x00000189, 0x000000a8 },
1040 { 0x00009a34, 0x000001c9, 0x00000169 },
1041 { 0x00009a38, 0x00000009, 0x000001a9 },
1042 { 0x00009a3c, 0x00000049, 0x000001e9 },
1043 { 0x00009a40, 0x00000089, 0x00000029 },
1044 { 0x00009a44, 0x00000170, 0x00000069 },
1045 { 0x00009a48, 0x000001b0, 0x00000190 },
1046 { 0x00009a4c, 0x000001f0, 0x000001d0 },
1047 { 0x00009a50, 0x00000030, 0x00000010 },
1048 { 0x00009a54, 0x00000070, 0x00000050 },
1049 { 0x00009a58, 0x00000191, 0x00000090 },
1050 { 0x00009a5c, 0x000001d1, 0x00000151 },
1051 { 0x00009a60, 0x00000011, 0x00000191 },
1052 { 0x00009a64, 0x00000051, 0x000001d1 },
1053 { 0x00009a68, 0x00000091, 0x00000011 },
1054 { 0x00009a6c, 0x000001b8, 0x00000051 },
1055 { 0x00009a70, 0x000001f8, 0x00000198 },
1056 { 0x00009a74, 0x00000038, 0x000001d8 },
1057 { 0x00009a78, 0x00000078, 0x00000018 },
1058 { 0x00009a7c, 0x00000199, 0x00000058 },
1059 { 0x00009a80, 0x000001d9, 0x00000098 },
1060 { 0x00009a84, 0x00000019, 0x00000159 },
1061 { 0x00009a88, 0x00000059, 0x00000199 },
1062 { 0x00009a8c, 0x00000099, 0x000001d9 },
1063 { 0x00009a90, 0x000000d9, 0x00000019 },
1064 { 0x00009a94, 0x000000f9, 0x00000059 },
1065 { 0x00009a98, 0x000000f9, 0x00000099 },
1066 { 0x00009a9c, 0x000000f9, 0x000000d9 },
1067 { 0x00009aa0, 0x000000f9, 0x000000f9 },
1068 { 0x00009aa4, 0x000000f9, 0x000000f9 },
1069 { 0x00009aa8, 0x000000f9, 0x000000f9 },
1070 { 0x00009aac, 0x000000f9, 0x000000f9 },
1071 { 0x00009ab0, 0x000000f9, 0x000000f9 },
1072 { 0x00009ab4, 0x000000f9, 0x000000f9 },
1073 { 0x00009ab8, 0x000000f9, 0x000000f9 },
1074 { 0x00009abc, 0x000000f9, 0x000000f9 },
1075 { 0x00009ac0, 0x000000f9, 0x000000f9 },
1076 { 0x00009ac4, 0x000000f9, 0x000000f9 },
1077 { 0x00009ac8, 0x000000f9, 0x000000f9 },
1078 { 0x00009acc, 0x000000f9, 0x000000f9 },
1079 { 0x00009ad0, 0x000000f9, 0x000000f9 },
1080 { 0x00009ad4, 0x000000f9, 0x000000f9 },
1081 { 0x00009ad8, 0x000000f9, 0x000000f9 },
1082 { 0x00009adc, 0x000000f9, 0x000000f9 },
1083 { 0x00009ae0, 0x000000f9, 0x000000f9 },
1084 { 0x00009ae4, 0x000000f9, 0x000000f9 },
1085 { 0x00009ae8, 0x000000f9, 0x000000f9 },
1086 { 0x00009aec, 0x000000f9, 0x000000f9 },
1087 { 0x00009af0, 0x000000f9, 0x000000f9 },
1088 { 0x00009af4, 0x000000f9, 0x000000f9 },
1089 { 0x00009af8, 0x000000f9, 0x000000f9 },
1090 { 0x00009afc, 0x000000f9, 0x000000f9 },
1091};
1092
1093static const u32 ar5416Bank1_9160[][2] = {
1094 { 0x000098b0, 0x02108421 },
1095 { 0x000098ec, 0x00000008 },
1096};
1097
1098static const u32 ar5416Bank2_9160[][2] = {
1099 { 0x000098b0, 0x0e73ff17 },
1100 { 0x000098e0, 0x00000420 },
1101};
1102
1103static const u32 ar5416Bank3_9160[][3] = {
1104 { 0x000098f0, 0x01400018, 0x01c00018 },
1105};
1106
1107static const u32 ar5416Bank6_9160[][3] = {
1108 { 0x0000989c, 0x00000000, 0x00000000 },
1109 { 0x0000989c, 0x00000000, 0x00000000 },
1110 { 0x0000989c, 0x00000000, 0x00000000 },
1111 { 0x0000989c, 0x00e00000, 0x00e00000 },
1112 { 0x0000989c, 0x005e0000, 0x005e0000 },
1113 { 0x0000989c, 0x00120000, 0x00120000 },
1114 { 0x0000989c, 0x00620000, 0x00620000 },
1115 { 0x0000989c, 0x00020000, 0x00020000 },
1116 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1117 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1118 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1119 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
1120 { 0x0000989c, 0x005f0000, 0x005f0000 },
1121 { 0x0000989c, 0x00870000, 0x00870000 },
1122 { 0x0000989c, 0x00f90000, 0x00f90000 },
1123 { 0x0000989c, 0x007b0000, 0x007b0000 },
1124 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1125 { 0x0000989c, 0x00f50000, 0x00f50000 },
1126 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1127 { 0x0000989c, 0x00110000, 0x00110000 },
1128 { 0x0000989c, 0x006100a8, 0x006100a8 },
1129 { 0x0000989c, 0x004210a2, 0x004210a2 },
1130 { 0x0000989c, 0x0014008f, 0x0014008f },
1131 { 0x0000989c, 0x00c40003, 0x00c40003 },
1132 { 0x0000989c, 0x003000f2, 0x003000f2 },
1133 { 0x0000989c, 0x00440016, 0x00440016 },
1134 { 0x0000989c, 0x00410040, 0x00410040 },
1135 { 0x0000989c, 0x0001805e, 0x0001805e },
1136 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
1137 { 0x0000989c, 0x000000f1, 0x000000f1 },
1138 { 0x0000989c, 0x00002081, 0x00002081 },
1139 { 0x0000989c, 0x000000d4, 0x000000d4 },
1140 { 0x000098d0, 0x0000000f, 0x0010000f },
1141};
1142
1143static const u32 ar5416Bank6TPC_9160[][3] = {
1144 { 0x0000989c, 0x00000000, 0x00000000 },
1145 { 0x0000989c, 0x00000000, 0x00000000 },
1146 { 0x0000989c, 0x00000000, 0x00000000 },
1147 { 0x0000989c, 0x00e00000, 0x00e00000 },
1148 { 0x0000989c, 0x005e0000, 0x005e0000 },
1149 { 0x0000989c, 0x00120000, 0x00120000 },
1150 { 0x0000989c, 0x00620000, 0x00620000 },
1151 { 0x0000989c, 0x00020000, 0x00020000 },
1152 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1153 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1154 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1155 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
1156 { 0x0000989c, 0x005f0000, 0x005f0000 },
1157 { 0x0000989c, 0x00870000, 0x00870000 },
1158 { 0x0000989c, 0x00f90000, 0x00f90000 },
1159 { 0x0000989c, 0x007b0000, 0x007b0000 },
1160 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1161 { 0x0000989c, 0x00f50000, 0x00f50000 },
1162 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1163 { 0x0000989c, 0x00110000, 0x00110000 },
1164 { 0x0000989c, 0x006100a8, 0x006100a8 },
1165 { 0x0000989c, 0x00423022, 0x00423022 },
1166 { 0x0000989c, 0x2014008f, 0x2014008f },
1167 { 0x0000989c, 0x00c40002, 0x00c40002 },
1168 { 0x0000989c, 0x003000f2, 0x003000f2 },
1169 { 0x0000989c, 0x00440016, 0x00440016 },
1170 { 0x0000989c, 0x00410040, 0x00410040 },
1171 { 0x0000989c, 0x0001805e, 0x0001805e },
1172 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
1173 { 0x0000989c, 0x000000e1, 0x000000e1 },
1174 { 0x0000989c, 0x00007080, 0x00007080 },
1175 { 0x0000989c, 0x000000d4, 0x000000d4 },
1176 { 0x000098d0, 0x0000000f, 0x0010000f },
1177};
1178
1179static const u32 ar5416Bank7_9160[][2] = {
1180 { 0x0000989c, 0x00000500 },
1181 { 0x0000989c, 0x00000800 },
1182 { 0x000098cc, 0x0000000e },
1183};
1184
1185static const u32 ar5416Addac_9160[][2] = {
1186 {0x0000989c, 0x00000000 },
1187 {0x0000989c, 0x00000000 },
1188 {0x0000989c, 0x00000000 },
1189 {0x0000989c, 0x00000000 },
1190 {0x0000989c, 0x00000000 },
1191 {0x0000989c, 0x00000000 },
1192 {0x0000989c, 0x000000c0 },
1193 {0x0000989c, 0x00000018 },
1194 {0x0000989c, 0x00000004 },
1195 {0x0000989c, 0x00000000 },
1196 {0x0000989c, 0x00000000 },
1197 {0x0000989c, 0x00000000 },
1198 {0x0000989c, 0x00000000 },
1199 {0x0000989c, 0x00000000 },
1200 {0x0000989c, 0x00000000 },
1201 {0x0000989c, 0x00000000 },
1202 {0x0000989c, 0x00000000 },
1203 {0x0000989c, 0x00000000 },
1204 {0x0000989c, 0x00000000 },
1205 {0x0000989c, 0x00000000 },
1206 {0x0000989c, 0x00000000 },
1207 {0x0000989c, 0x000000c0 },
1208 {0x0000989c, 0x00000019 },
1209 {0x0000989c, 0x00000004 },
1210 {0x0000989c, 0x00000000 },
1211 {0x0000989c, 0x00000000 },
1212 {0x0000989c, 0x00000000 },
1213 {0x0000989c, 0x00000004 },
1214 {0x0000989c, 0x00000003 },
1215 {0x0000989c, 0x00000008 },
1216 {0x0000989c, 0x00000000 },
1217 {0x000098cc, 0x00000000 },
1218};
1219
1220static const u32 ar5416Addac_91601_1[][2] = {
1221 {0x0000989c, 0x00000000 },
1222 {0x0000989c, 0x00000000 },
1223 {0x0000989c, 0x00000000 },
1224 {0x0000989c, 0x00000000 },
1225 {0x0000989c, 0x00000000 },
1226 {0x0000989c, 0x00000000 },
1227 {0x0000989c, 0x000000c0 },
1228 {0x0000989c, 0x00000018 },
1229 {0x0000989c, 0x00000004 },
1230 {0x0000989c, 0x00000000 },
1231 {0x0000989c, 0x00000000 },
1232 {0x0000989c, 0x00000000 },
1233 {0x0000989c, 0x00000000 },
1234 {0x0000989c, 0x00000000 },
1235 {0x0000989c, 0x00000000 },
1236 {0x0000989c, 0x00000000 },
1237 {0x0000989c, 0x00000000 },
1238 {0x0000989c, 0x00000000 },
1239 {0x0000989c, 0x00000000 },
1240 {0x0000989c, 0x00000000 },
1241 {0x0000989c, 0x00000000 },
1242 {0x0000989c, 0x000000c0 },
1243 {0x0000989c, 0x00000019 },
1244 {0x0000989c, 0x00000004 },
1245 {0x0000989c, 0x00000000 },
1246 {0x0000989c, 0x00000000 },
1247 {0x0000989c, 0x00000000 },
1248 {0x0000989c, 0x00000000 },
1249 {0x0000989c, 0x00000000 },
1250 {0x0000989c, 0x00000000 },
1251 {0x0000989c, 0x00000000 },
1252 {0x000098cc, 0x00000000 },
1253};
1254
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
new file mode 100644
index 000000000000..5fdbb53b47e0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -0,0 +1,1000 @@
1/*
2 * Copyright (c) 2008-2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "hw.h"
18#include "hw-ops.h"
19#include "ar9002_phy.h"
20
21#define AR9285_CLCAL_REDO_THRESH 1
22
23static void ar9002_hw_setup_calibration(struct ath_hw *ah,
24 struct ath9k_cal_list *currCal)
25{
26 struct ath_common *common = ath9k_hw_common(ah);
27
28 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
29 AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX,
30 currCal->calData->calCountMax);
31
32 switch (currCal->calData->calType) {
33 case IQ_MISMATCH_CAL:
34 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
35 ath_print(common, ATH_DBG_CALIBRATE,
36 "starting IQ Mismatch Calibration\n");
37 break;
38 case ADC_GAIN_CAL:
39 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
40 ath_print(common, ATH_DBG_CALIBRATE,
41 "starting ADC Gain Calibration\n");
42 break;
43 case ADC_DC_CAL:
44 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
45 ath_print(common, ATH_DBG_CALIBRATE,
46 "starting ADC DC Calibration\n");
47 break;
48 case ADC_DC_INIT_CAL:
49 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT);
50 ath_print(common, ATH_DBG_CALIBRATE,
51 "starting Init ADC DC Calibration\n");
52 break;
53 case TEMP_COMP_CAL:
54 break; /* Not supported */
55 }
56
57 REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
58 AR_PHY_TIMING_CTRL4_DO_CAL);
59}
60
61static bool ar9002_hw_per_calibration(struct ath_hw *ah,
62 struct ath9k_channel *ichan,
63 u8 rxchainmask,
64 struct ath9k_cal_list *currCal)
65{
66 bool iscaldone = false;
67
68 if (currCal->calState == CAL_RUNNING) {
69 if (!(REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) &
70 AR_PHY_TIMING_CTRL4_DO_CAL)) {
71
72 currCal->calData->calCollect(ah);
73 ah->cal_samples++;
74
75 if (ah->cal_samples >=
76 currCal->calData->calNumSamples) {
77 int i, numChains = 0;
78 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
79 if (rxchainmask & (1 << i))
80 numChains++;
81 }
82
83 currCal->calData->calPostProc(ah, numChains);
84 ichan->CalValid |= currCal->calData->calType;
85 currCal->calState = CAL_DONE;
86 iscaldone = true;
87 } else {
88 ar9002_hw_setup_calibration(ah, currCal);
89 }
90 }
91 } else if (!(ichan->CalValid & currCal->calData->calType)) {
92 ath9k_hw_reset_calibration(ah, currCal);
93 }
94
95 return iscaldone;
96}
97
98/* Assumes you are talking about the currently configured channel */
99static bool ar9002_hw_iscal_supported(struct ath_hw *ah,
100 enum ath9k_cal_types calType)
101{
102 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
103
104 switch (calType & ah->supp_cals) {
105 case IQ_MISMATCH_CAL: /* Both 2 GHz and 5 GHz support OFDM */
106 return true;
107 case ADC_GAIN_CAL:
108 case ADC_DC_CAL:
109 if (!(conf->channel->band == IEEE80211_BAND_2GHZ &&
110 conf_is_ht20(conf)))
111 return true;
112 break;
113 }
114 return false;
115}
116
117static void ar9002_hw_iqcal_collect(struct ath_hw *ah)
118{
119 int i;
120
121 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
122 ah->totalPowerMeasI[i] +=
123 REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
124 ah->totalPowerMeasQ[i] +=
125 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
126 ah->totalIqCorrMeas[i] +=
127 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
128 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
129 "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
130 ah->cal_samples, i, ah->totalPowerMeasI[i],
131 ah->totalPowerMeasQ[i],
132 ah->totalIqCorrMeas[i]);
133 }
134}
135
136static void ar9002_hw_adc_gaincal_collect(struct ath_hw *ah)
137{
138 int i;
139
140 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
141 ah->totalAdcIOddPhase[i] +=
142 REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
143 ah->totalAdcIEvenPhase[i] +=
144 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
145 ah->totalAdcQOddPhase[i] +=
146 REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
147 ah->totalAdcQEvenPhase[i] +=
148 REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
149
150 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
151 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
152 "oddq=0x%08x; evenq=0x%08x;\n",
153 ah->cal_samples, i,
154 ah->totalAdcIOddPhase[i],
155 ah->totalAdcIEvenPhase[i],
156 ah->totalAdcQOddPhase[i],
157 ah->totalAdcQEvenPhase[i]);
158 }
159}
160
161static void ar9002_hw_adc_dccal_collect(struct ath_hw *ah)
162{
163 int i;
164
165 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
166 ah->totalAdcDcOffsetIOddPhase[i] +=
167 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
168 ah->totalAdcDcOffsetIEvenPhase[i] +=
169 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
170 ah->totalAdcDcOffsetQOddPhase[i] +=
171 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
172 ah->totalAdcDcOffsetQEvenPhase[i] +=
173 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
174
175 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
176 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
177 "oddq=0x%08x; evenq=0x%08x;\n",
178 ah->cal_samples, i,
179 ah->totalAdcDcOffsetIOddPhase[i],
180 ah->totalAdcDcOffsetIEvenPhase[i],
181 ah->totalAdcDcOffsetQOddPhase[i],
182 ah->totalAdcDcOffsetQEvenPhase[i]);
183 }
184}
185
186static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
187{
188 struct ath_common *common = ath9k_hw_common(ah);
189 u32 powerMeasQ, powerMeasI, iqCorrMeas;
190 u32 qCoffDenom, iCoffDenom;
191 int32_t qCoff, iCoff;
192 int iqCorrNeg, i;
193
194 for (i = 0; i < numChains; i++) {
195 powerMeasI = ah->totalPowerMeasI[i];
196 powerMeasQ = ah->totalPowerMeasQ[i];
197 iqCorrMeas = ah->totalIqCorrMeas[i];
198
199 ath_print(common, ATH_DBG_CALIBRATE,
200 "Starting IQ Cal and Correction for Chain %d\n",
201 i);
202
203 ath_print(common, ATH_DBG_CALIBRATE,
204 "Orignal: Chn %diq_corr_meas = 0x%08x\n",
205 i, ah->totalIqCorrMeas[i]);
206
207 iqCorrNeg = 0;
208
209 if (iqCorrMeas > 0x80000000) {
210 iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
211 iqCorrNeg = 1;
212 }
213
214 ath_print(common, ATH_DBG_CALIBRATE,
215 "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
216 ath_print(common, ATH_DBG_CALIBRATE,
217 "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
218 ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
219 iqCorrNeg);
220
221 iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
222 qCoffDenom = powerMeasQ / 64;
223
224 if ((powerMeasQ != 0) && (iCoffDenom != 0) &&
225 (qCoffDenom != 0)) {
226 iCoff = iqCorrMeas / iCoffDenom;
227 qCoff = powerMeasI / qCoffDenom - 64;
228 ath_print(common, ATH_DBG_CALIBRATE,
229 "Chn %d iCoff = 0x%08x\n", i, iCoff);
230 ath_print(common, ATH_DBG_CALIBRATE,
231 "Chn %d qCoff = 0x%08x\n", i, qCoff);
232
233 iCoff = iCoff & 0x3f;
234 ath_print(common, ATH_DBG_CALIBRATE,
235 "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
236 if (iqCorrNeg == 0x0)
237 iCoff = 0x40 - iCoff;
238
239 if (qCoff > 15)
240 qCoff = 15;
241 else if (qCoff <= -16)
242 qCoff = 16;
243
244 ath_print(common, ATH_DBG_CALIBRATE,
245 "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
246 i, iCoff, qCoff);
247
248 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
249 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
250 iCoff);
251 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
252 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
253 qCoff);
254 ath_print(common, ATH_DBG_CALIBRATE,
255 "IQ Cal and Correction done for Chain %d\n",
256 i);
257 }
258 }
259
260 REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
261 AR_PHY_TIMING_CTRL4_IQCORR_ENABLE);
262}
263
264static void ar9002_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
265{
266 struct ath_common *common = ath9k_hw_common(ah);
267 u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset, qEvenMeasOffset;
268 u32 qGainMismatch, iGainMismatch, val, i;
269
270 for (i = 0; i < numChains; i++) {
271 iOddMeasOffset = ah->totalAdcIOddPhase[i];
272 iEvenMeasOffset = ah->totalAdcIEvenPhase[i];
273 qOddMeasOffset = ah->totalAdcQOddPhase[i];
274 qEvenMeasOffset = ah->totalAdcQEvenPhase[i];
275
276 ath_print(common, ATH_DBG_CALIBRATE,
277 "Starting ADC Gain Cal for Chain %d\n", i);
278
279 ath_print(common, ATH_DBG_CALIBRATE,
280 "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
281 iOddMeasOffset);
282 ath_print(common, ATH_DBG_CALIBRATE,
283 "Chn %d pwr_meas_even_i = 0x%08x\n", i,
284 iEvenMeasOffset);
285 ath_print(common, ATH_DBG_CALIBRATE,
286 "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
287 qOddMeasOffset);
288 ath_print(common, ATH_DBG_CALIBRATE,
289 "Chn %d pwr_meas_even_q = 0x%08x\n", i,
290 qEvenMeasOffset);
291
292 if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
293 iGainMismatch =
294 ((iEvenMeasOffset * 32) /
295 iOddMeasOffset) & 0x3f;
296 qGainMismatch =
297 ((qOddMeasOffset * 32) /
298 qEvenMeasOffset) & 0x3f;
299
300 ath_print(common, ATH_DBG_CALIBRATE,
301 "Chn %d gain_mismatch_i = 0x%08x\n", i,
302 iGainMismatch);
303 ath_print(common, ATH_DBG_CALIBRATE,
304 "Chn %d gain_mismatch_q = 0x%08x\n", i,
305 qGainMismatch);
306
307 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
308 val &= 0xfffff000;
309 val |= (qGainMismatch) | (iGainMismatch << 6);
310 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
311
312 ath_print(common, ATH_DBG_CALIBRATE,
313 "ADC Gain Cal done for Chain %d\n", i);
314 }
315 }
316
317 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
318 REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
319 AR_PHY_NEW_ADC_GAIN_CORR_ENABLE);
320}
321
322static void ar9002_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
323{
324 struct ath_common *common = ath9k_hw_common(ah);
325 u32 iOddMeasOffset, iEvenMeasOffset, val, i;
326 int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
327 const struct ath9k_percal_data *calData =
328 ah->cal_list_curr->calData;
329 u32 numSamples =
330 (1 << (calData->calCountMax + 5)) * calData->calNumSamples;
331
332 for (i = 0; i < numChains; i++) {
333 iOddMeasOffset = ah->totalAdcDcOffsetIOddPhase[i];
334 iEvenMeasOffset = ah->totalAdcDcOffsetIEvenPhase[i];
335 qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i];
336 qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i];
337
338 ath_print(common, ATH_DBG_CALIBRATE,
339 "Starting ADC DC Offset Cal for Chain %d\n", i);
340
341 ath_print(common, ATH_DBG_CALIBRATE,
342 "Chn %d pwr_meas_odd_i = %d\n", i,
343 iOddMeasOffset);
344 ath_print(common, ATH_DBG_CALIBRATE,
345 "Chn %d pwr_meas_even_i = %d\n", i,
346 iEvenMeasOffset);
347 ath_print(common, ATH_DBG_CALIBRATE,
348 "Chn %d pwr_meas_odd_q = %d\n", i,
349 qOddMeasOffset);
350 ath_print(common, ATH_DBG_CALIBRATE,
351 "Chn %d pwr_meas_even_q = %d\n", i,
352 qEvenMeasOffset);
353
354 iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
355 numSamples) & 0x1ff;
356 qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
357 numSamples) & 0x1ff;
358
359 ath_print(common, ATH_DBG_CALIBRATE,
360 "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
361 iDcMismatch);
362 ath_print(common, ATH_DBG_CALIBRATE,
363 "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
364 qDcMismatch);
365
366 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
367 val &= 0xc0000fff;
368 val |= (qDcMismatch << 12) | (iDcMismatch << 21);
369 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
370
371 ath_print(common, ATH_DBG_CALIBRATE,
372 "ADC DC Offset Cal done for Chain %d\n", i);
373 }
374
375 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
376 REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
377 AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE);
378}
379
380static void ar9287_hw_olc_temp_compensation(struct ath_hw *ah)
381{
382 u32 rddata;
383 int32_t delta, currPDADC, slope;
384
385 rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
386 currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
387
388 if (ah->initPDADC == 0 || currPDADC == 0) {
389 /*
390 * Zero value indicates that no frames have been transmitted
391 * yet, can't do temperature compensation until frames are
392 * transmitted.
393 */
394 return;
395 } else {
396 slope = ah->eep_ops->get_eeprom(ah, EEP_TEMPSENSE_SLOPE);
397
398 if (slope == 0) { /* to avoid divide by zero case */
399 delta = 0;
400 } else {
401 delta = ((currPDADC - ah->initPDADC)*4) / slope;
402 }
403 REG_RMW_FIELD(ah, AR_PHY_CH0_TX_PWRCTRL11,
404 AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
405 REG_RMW_FIELD(ah, AR_PHY_CH1_TX_PWRCTRL11,
406 AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
407 }
408}
409
410static void ar9280_hw_olc_temp_compensation(struct ath_hw *ah)
411{
412 u32 rddata, i;
413 int delta, currPDADC, regval;
414
415 rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
416 currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
417
418 if (ah->initPDADC == 0 || currPDADC == 0)
419 return;
420
421 if (ah->eep_ops->get_eeprom(ah, EEP_DAC_HPWR_5G))
422 delta = (currPDADC - ah->initPDADC + 4) / 8;
423 else
424 delta = (currPDADC - ah->initPDADC + 5) / 10;
425
426 if (delta != ah->PDADCdelta) {
427 ah->PDADCdelta = delta;
428 for (i = 1; i < AR9280_TX_GAIN_TABLE_SIZE; i++) {
429 regval = ah->originalGain[i] - delta;
430 if (regval < 0)
431 regval = 0;
432
433 REG_RMW_FIELD(ah,
434 AR_PHY_TX_GAIN_TBL1 + i * 4,
435 AR_PHY_TX_GAIN, regval);
436 }
437 }
438}
439
440static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
441{
442 u32 regVal;
443 unsigned int i;
444 u32 regList[][2] = {
445 { 0x786c, 0 },
446 { 0x7854, 0 },
447 { 0x7820, 0 },
448 { 0x7824, 0 },
449 { 0x7868, 0 },
450 { 0x783c, 0 },
451 { 0x7838, 0 } ,
452 { 0x7828, 0 } ,
453 };
454
455 for (i = 0; i < ARRAY_SIZE(regList); i++)
456 regList[i][1] = REG_READ(ah, regList[i][0]);
457
458 regVal = REG_READ(ah, 0x7834);
459 regVal &= (~(0x1));
460 REG_WRITE(ah, 0x7834, regVal);
461 regVal = REG_READ(ah, 0x9808);
462 regVal |= (0x1 << 27);
463 REG_WRITE(ah, 0x9808, regVal);
464
465 /* 786c,b23,1, pwddac=1 */
466 REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
467 /* 7854, b5,1, pdrxtxbb=1 */
468 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
469 /* 7854, b7,1, pdv2i=1 */
470 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
471 /* 7854, b8,1, pddacinterface=1 */
472 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
473 /* 7824,b12,0, offcal=0 */
474 REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
475 /* 7838, b1,0, pwddb=0 */
476 REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
477 /* 7820,b11,0, enpacal=0 */
478 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
479 /* 7820,b25,1, pdpadrv1=0 */
480 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
481 /* 7820,b24,0, pdpadrv2=0 */
482 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
483 /* 7820,b23,0, pdpaout=0 */
484 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
485 /* 783c,b14-16,7, padrvgn2tab_0=7 */
486 REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
487 /*
488 * 7838,b29-31,0, padrvgn1tab_0=0
489 * does not matter since we turn it off
490 */
491 REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
492
493 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_CCOMP, 0xfff);
494
495 /* Set:
496 * localmode=1,bmode=1,bmoderxtx=1,synthon=1,
497 * txon=1,paon=1,oscon=1,synthon_force=1
498 */
499 REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
500 udelay(30);
501 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0);
502
503 /* find off_6_1; */
504 for (i = 6; i > 0; i--) {
505 regVal = REG_READ(ah, 0x7834);
506 regVal |= (1 << (20 + i));
507 REG_WRITE(ah, 0x7834, regVal);
508 udelay(1);
509 /* regVal = REG_READ(ah, 0x7834); */
510 regVal &= (~(0x1 << (20 + i)));
511 regVal |= (MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9)
512 << (20 + i));
513 REG_WRITE(ah, 0x7834, regVal);
514 }
515
516 regVal = (regVal >> 20) & 0x7f;
517
518 /* Update PA cal info */
519 if ((!is_reset) && (ah->pacal_info.prev_offset == regVal)) {
520 if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
521 ah->pacal_info.max_skipcount =
522 2 * ah->pacal_info.max_skipcount;
523 ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
524 } else {
525 ah->pacal_info.max_skipcount = 1;
526 ah->pacal_info.skipcount = 0;
527 ah->pacal_info.prev_offset = regVal;
528 }
529
530 ENABLE_REGWRITE_BUFFER(ah);
531
532 regVal = REG_READ(ah, 0x7834);
533 regVal |= 0x1;
534 REG_WRITE(ah, 0x7834, regVal);
535 regVal = REG_READ(ah, 0x9808);
536 regVal &= (~(0x1 << 27));
537 REG_WRITE(ah, 0x9808, regVal);
538
539 for (i = 0; i < ARRAY_SIZE(regList); i++)
540 REG_WRITE(ah, regList[i][0], regList[i][1]);
541
542 REGWRITE_BUFFER_FLUSH(ah);
543 DISABLE_REGWRITE_BUFFER(ah);
544}
545
546static inline void ar9285_hw_pa_cal(struct ath_hw *ah, bool is_reset)
547{
548 struct ath_common *common = ath9k_hw_common(ah);
549 u32 regVal;
550 int i, offset, offs_6_1, offs_0;
551 u32 ccomp_org, reg_field;
552 u32 regList[][2] = {
553 { 0x786c, 0 },
554 { 0x7854, 0 },
555 { 0x7820, 0 },
556 { 0x7824, 0 },
557 { 0x7868, 0 },
558 { 0x783c, 0 },
559 { 0x7838, 0 },
560 };
561
562 ath_print(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n");
563
564 /* PA CAL is not needed for high power solution */
565 if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) ==
566 AR5416_EEP_TXGAIN_HIGH_POWER)
567 return;
568
569 if (AR_SREV_9285_11(ah)) {
570 REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14));
571 udelay(10);
572 }
573
574 for (i = 0; i < ARRAY_SIZE(regList); i++)
575 regList[i][1] = REG_READ(ah, regList[i][0]);
576
577 regVal = REG_READ(ah, 0x7834);
578 regVal &= (~(0x1));
579 REG_WRITE(ah, 0x7834, regVal);
580 regVal = REG_READ(ah, 0x9808);
581 regVal |= (0x1 << 27);
582 REG_WRITE(ah, 0x9808, regVal);
583
584 REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
585 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
586 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
587 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
588 REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
589 REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
590 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
591 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
592 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
593 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
594 REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
595 REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
596 ccomp_org = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_CCOMP);
597 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, 0xf);
598
599 REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
600 udelay(30);
601 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, 0);
602 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 0);
603
604 for (i = 6; i > 0; i--) {
605 regVal = REG_READ(ah, 0x7834);
606 regVal |= (1 << (19 + i));
607 REG_WRITE(ah, 0x7834, regVal);
608 udelay(1);
609 regVal = REG_READ(ah, 0x7834);
610 regVal &= (~(0x1 << (19 + i)));
611 reg_field = MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9);
612 regVal |= (reg_field << (19 + i));
613 REG_WRITE(ah, 0x7834, regVal);
614 }
615
616 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 1);
617 udelay(1);
618 reg_field = MS(REG_READ(ah, AR9285_AN_RF2G9), AR9285_AN_RXTXBB1_SPARE9);
619 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, reg_field);
620 offs_6_1 = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_OFFS);
621 offs_0 = MS(REG_READ(ah, AR9285_AN_RF2G3), AR9285_AN_RF2G3_PDVCCOMP);
622
623 offset = (offs_6_1<<1) | offs_0;
624 offset = offset - 0;
625 offs_6_1 = offset>>1;
626 offs_0 = offset & 1;
627
628 if ((!is_reset) && (ah->pacal_info.prev_offset == offset)) {
629 if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
630 ah->pacal_info.max_skipcount =
631 2 * ah->pacal_info.max_skipcount;
632 ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
633 } else {
634 ah->pacal_info.max_skipcount = 1;
635 ah->pacal_info.skipcount = 0;
636 ah->pacal_info.prev_offset = offset;
637 }
638
639 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, offs_6_1);
640 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, offs_0);
641
642 regVal = REG_READ(ah, 0x7834);
643 regVal |= 0x1;
644 REG_WRITE(ah, 0x7834, regVal);
645 regVal = REG_READ(ah, 0x9808);
646 regVal &= (~(0x1 << 27));
647 REG_WRITE(ah, 0x9808, regVal);
648
649 for (i = 0; i < ARRAY_SIZE(regList); i++)
650 REG_WRITE(ah, regList[i][0], regList[i][1]);
651
652 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, ccomp_org);
653
654 if (AR_SREV_9285_11(ah))
655 REG_WRITE(ah, AR9285_AN_TOP4, AR9285_AN_TOP4_DEFAULT);
656
657}
658
659static void ar9002_hw_pa_cal(struct ath_hw *ah, bool is_reset)
660{
661 if (AR_SREV_9271(ah)) {
662 if (is_reset || !ah->pacal_info.skipcount)
663 ar9271_hw_pa_cal(ah, is_reset);
664 else
665 ah->pacal_info.skipcount--;
666 } else if (AR_SREV_9285_11_OR_LATER(ah)) {
667 if (is_reset || !ah->pacal_info.skipcount)
668 ar9285_hw_pa_cal(ah, is_reset);
669 else
670 ah->pacal_info.skipcount--;
671 }
672}
673
674static void ar9002_hw_olc_temp_compensation(struct ath_hw *ah)
675{
676 if (OLC_FOR_AR9287_10_LATER)
677 ar9287_hw_olc_temp_compensation(ah);
678 else if (OLC_FOR_AR9280_20_LATER)
679 ar9280_hw_olc_temp_compensation(ah);
680}
681
682static bool ar9002_hw_calibrate(struct ath_hw *ah,
683 struct ath9k_channel *chan,
684 u8 rxchainmask,
685 bool longcal)
686{
687 bool iscaldone = true;
688 struct ath9k_cal_list *currCal = ah->cal_list_curr;
689
690 if (currCal &&
691 (currCal->calState == CAL_RUNNING ||
692 currCal->calState == CAL_WAITING)) {
693 iscaldone = ar9002_hw_per_calibration(ah, chan,
694 rxchainmask, currCal);
695 if (iscaldone) {
696 ah->cal_list_curr = currCal = currCal->calNext;
697
698 if (currCal->calState == CAL_WAITING) {
699 iscaldone = false;
700 ath9k_hw_reset_calibration(ah, currCal);
701 }
702 }
703 }
704
705 /* Do NF cal only at longer intervals */
706 if (longcal) {
707 /* Do periodic PAOffset Cal */
708 ar9002_hw_pa_cal(ah, false);
709 ar9002_hw_olc_temp_compensation(ah);
710
711 /*
712 * Get the value from the previous NF cal and update
713 * history buffer.
714 */
715 ath9k_hw_getnf(ah, chan);
716
717 /*
718 * Load the NF from history buffer of the current channel.
719 * NF is slow time-variant, so it is OK to use a historical
720 * value.
721 */
722 ath9k_hw_loadnf(ah, ah->curchan);
723
724 ath9k_hw_start_nfcal(ah);
725 }
726
727 return iscaldone;
728}
729
730/* Carrier leakage Calibration fix */
731static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
732{
733 struct ath_common *common = ath9k_hw_common(ah);
734
735 REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
736 if (IS_CHAN_HT20(chan)) {
737 REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
738 REG_SET_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
739 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
740 AR_PHY_AGC_CONTROL_FLTR_CAL);
741 REG_CLR_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
742 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
743 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
744 AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
745 ath_print(common, ATH_DBG_CALIBRATE, "offset "
746 "calibration failed to complete in "
747 "1ms; noisy ??\n");
748 return false;
749 }
750 REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
751 REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
752 REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
753 }
754 REG_CLR_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
755 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
756 REG_SET_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
757 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
758 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
759 0, AH_WAIT_TIMEOUT)) {
760 ath_print(common, ATH_DBG_CALIBRATE, "offset calibration "
761 "failed to complete in 1ms; noisy ??\n");
762 return false;
763 }
764
765 REG_SET_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
766 REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
767 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
768
769 return true;
770}
771
772static bool ar9285_hw_clc(struct ath_hw *ah, struct ath9k_channel *chan)
773{
774 int i;
775 u_int32_t txgain_max;
776 u_int32_t clc_gain, gain_mask = 0, clc_num = 0;
777 u_int32_t reg_clc_I0, reg_clc_Q0;
778 u_int32_t i0_num = 0;
779 u_int32_t q0_num = 0;
780 u_int32_t total_num = 0;
781 u_int32_t reg_rf2g5_org;
782 bool retv = true;
783
784 if (!(ar9285_hw_cl_cal(ah, chan)))
785 return false;
786
787 txgain_max = MS(REG_READ(ah, AR_PHY_TX_PWRCTRL7),
788 AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX);
789
790 for (i = 0; i < (txgain_max+1); i++) {
791 clc_gain = (REG_READ(ah, (AR_PHY_TX_GAIN_TBL1+(i<<2))) &
792 AR_PHY_TX_GAIN_CLC) >> AR_PHY_TX_GAIN_CLC_S;
793 if (!(gain_mask & (1 << clc_gain))) {
794 gain_mask |= (1 << clc_gain);
795 clc_num++;
796 }
797 }
798
799 for (i = 0; i < clc_num; i++) {
800 reg_clc_I0 = (REG_READ(ah, (AR_PHY_CLC_TBL1 + (i << 2)))
801 & AR_PHY_CLC_I0) >> AR_PHY_CLC_I0_S;
802 reg_clc_Q0 = (REG_READ(ah, (AR_PHY_CLC_TBL1 + (i << 2)))
803 & AR_PHY_CLC_Q0) >> AR_PHY_CLC_Q0_S;
804 if (reg_clc_I0 == 0)
805 i0_num++;
806
807 if (reg_clc_Q0 == 0)
808 q0_num++;
809 }
810 total_num = i0_num + q0_num;
811 if (total_num > AR9285_CLCAL_REDO_THRESH) {
812 reg_rf2g5_org = REG_READ(ah, AR9285_RF2G5);
813 if (AR_SREV_9285E_20(ah)) {
814 REG_WRITE(ah, AR9285_RF2G5,
815 (reg_rf2g5_org & AR9285_RF2G5_IC50TX) |
816 AR9285_RF2G5_IC50TX_XE_SET);
817 } else {
818 REG_WRITE(ah, AR9285_RF2G5,
819 (reg_rf2g5_org & AR9285_RF2G5_IC50TX) |
820 AR9285_RF2G5_IC50TX_SET);
821 }
822 retv = ar9285_hw_cl_cal(ah, chan);
823 REG_WRITE(ah, AR9285_RF2G5, reg_rf2g5_org);
824 }
825 return retv;
826}
827
828static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
829{
830 struct ath_common *common = ath9k_hw_common(ah);
831
832 if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) {
833 if (!ar9285_hw_clc(ah, chan))
834 return false;
835 } else {
836 if (AR_SREV_9280_10_OR_LATER(ah)) {
837 if (!AR_SREV_9287_10_OR_LATER(ah))
838 REG_CLR_BIT(ah, AR_PHY_ADC_CTL,
839 AR_PHY_ADC_CTL_OFF_PWDADC);
840 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
841 AR_PHY_AGC_CONTROL_FLTR_CAL);
842 }
843
844 /* Calibrate the AGC */
845 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
846 REG_READ(ah, AR_PHY_AGC_CONTROL) |
847 AR_PHY_AGC_CONTROL_CAL);
848
849 /* Poll for offset calibration complete */
850 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
851 AR_PHY_AGC_CONTROL_CAL,
852 0, AH_WAIT_TIMEOUT)) {
853 ath_print(common, ATH_DBG_CALIBRATE,
854 "offset calibration failed to "
855 "complete in 1ms; noisy environment?\n");
856 return false;
857 }
858
859 if (AR_SREV_9280_10_OR_LATER(ah)) {
860 if (!AR_SREV_9287_10_OR_LATER(ah))
861 REG_SET_BIT(ah, AR_PHY_ADC_CTL,
862 AR_PHY_ADC_CTL_OFF_PWDADC);
863 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
864 AR_PHY_AGC_CONTROL_FLTR_CAL);
865 }
866 }
867
868 /* Do PA Calibration */
869 ar9002_hw_pa_cal(ah, true);
870
871 /* Do NF Calibration after DC offset and other calibrations */
872 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
873 REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_NF);
874
875 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
876
877 /* Enable IQ, ADC Gain and ADC DC offset CALs */
878 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
879 if (ar9002_hw_iscal_supported(ah, ADC_GAIN_CAL)) {
880 INIT_CAL(&ah->adcgain_caldata);
881 INSERT_CAL(ah, &ah->adcgain_caldata);
882 ath_print(common, ATH_DBG_CALIBRATE,
883 "enabling ADC Gain Calibration.\n");
884 }
885 if (ar9002_hw_iscal_supported(ah, ADC_DC_CAL)) {
886 INIT_CAL(&ah->adcdc_caldata);
887 INSERT_CAL(ah, &ah->adcdc_caldata);
888 ath_print(common, ATH_DBG_CALIBRATE,
889 "enabling ADC DC Calibration.\n");
890 }
891 if (ar9002_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) {
892 INIT_CAL(&ah->iq_caldata);
893 INSERT_CAL(ah, &ah->iq_caldata);
894 ath_print(common, ATH_DBG_CALIBRATE,
895 "enabling IQ Calibration.\n");
896 }
897
898 ah->cal_list_curr = ah->cal_list;
899
900 if (ah->cal_list_curr)
901 ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
902 }
903
904 chan->CalValid = 0;
905
906 return true;
907}
908
909static const struct ath9k_percal_data iq_cal_multi_sample = {
910 IQ_MISMATCH_CAL,
911 MAX_CAL_SAMPLES,
912 PER_MIN_LOG_COUNT,
913 ar9002_hw_iqcal_collect,
914 ar9002_hw_iqcalibrate
915};
916static const struct ath9k_percal_data iq_cal_single_sample = {
917 IQ_MISMATCH_CAL,
918 MIN_CAL_SAMPLES,
919 PER_MAX_LOG_COUNT,
920 ar9002_hw_iqcal_collect,
921 ar9002_hw_iqcalibrate
922};
923static const struct ath9k_percal_data adc_gain_cal_multi_sample = {
924 ADC_GAIN_CAL,
925 MAX_CAL_SAMPLES,
926 PER_MIN_LOG_COUNT,
927 ar9002_hw_adc_gaincal_collect,
928 ar9002_hw_adc_gaincal_calibrate
929};
930static const struct ath9k_percal_data adc_gain_cal_single_sample = {
931 ADC_GAIN_CAL,
932 MIN_CAL_SAMPLES,
933 PER_MAX_LOG_COUNT,
934 ar9002_hw_adc_gaincal_collect,
935 ar9002_hw_adc_gaincal_calibrate
936};
937static const struct ath9k_percal_data adc_dc_cal_multi_sample = {
938 ADC_DC_CAL,
939 MAX_CAL_SAMPLES,
940 PER_MIN_LOG_COUNT,
941 ar9002_hw_adc_dccal_collect,
942 ar9002_hw_adc_dccal_calibrate
943};
944static const struct ath9k_percal_data adc_dc_cal_single_sample = {
945 ADC_DC_CAL,
946 MIN_CAL_SAMPLES,
947 PER_MAX_LOG_COUNT,
948 ar9002_hw_adc_dccal_collect,
949 ar9002_hw_adc_dccal_calibrate
950};
951static const struct ath9k_percal_data adc_init_dc_cal = {
952 ADC_DC_INIT_CAL,
953 MIN_CAL_SAMPLES,
954 INIT_LOG_COUNT,
955 ar9002_hw_adc_dccal_collect,
956 ar9002_hw_adc_dccal_calibrate
957};
958
959static void ar9002_hw_init_cal_settings(struct ath_hw *ah)
960{
961 if (AR_SREV_9100(ah)) {
962 ah->iq_caldata.calData = &iq_cal_multi_sample;
963 ah->supp_cals = IQ_MISMATCH_CAL;
964 return;
965 }
966
967 if (AR_SREV_9160_10_OR_LATER(ah)) {
968 if (AR_SREV_9280_10_OR_LATER(ah)) {
969 ah->iq_caldata.calData = &iq_cal_single_sample;
970 ah->adcgain_caldata.calData =
971 &adc_gain_cal_single_sample;
972 ah->adcdc_caldata.calData =
973 &adc_dc_cal_single_sample;
974 ah->adcdc_calinitdata.calData =
975 &adc_init_dc_cal;
976 } else {
977 ah->iq_caldata.calData = &iq_cal_multi_sample;
978 ah->adcgain_caldata.calData =
979 &adc_gain_cal_multi_sample;
980 ah->adcdc_caldata.calData =
981 &adc_dc_cal_multi_sample;
982 ah->adcdc_calinitdata.calData =
983 &adc_init_dc_cal;
984 }
985 ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
986 }
987}
988
989void ar9002_hw_attach_calib_ops(struct ath_hw *ah)
990{
991 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
992 struct ath_hw_ops *ops = ath9k_hw_ops(ah);
993
994 priv_ops->init_cal_settings = ar9002_hw_init_cal_settings;
995 priv_ops->init_cal = ar9002_hw_init_cal;
996 priv_ops->setup_calibration = ar9002_hw_setup_calibration;
997 priv_ops->iscal_supported = ar9002_hw_iscal_supported;
998
999 ops->calibrate = ar9002_hw_calibrate;
1000}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
new file mode 100644
index 000000000000..a8a8cdc04afa
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -0,0 +1,598 @@
1/*
2 * Copyright (c) 2008-2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "hw.h"
18#include "ar5008_initvals.h"
19#include "ar9001_initvals.h"
20#include "ar9002_initvals.h"
21
22/* General hardware code for the A5008/AR9001/AR9002 hadware families */
23
24static bool ar9002_hw_macversion_supported(u32 macversion)
25{
26 switch (macversion) {
27 case AR_SREV_VERSION_5416_PCI:
28 case AR_SREV_VERSION_5416_PCIE:
29 case AR_SREV_VERSION_9160:
30 case AR_SREV_VERSION_9100:
31 case AR_SREV_VERSION_9280:
32 case AR_SREV_VERSION_9285:
33 case AR_SREV_VERSION_9287:
34 case AR_SREV_VERSION_9271:
35 return true;
36 default:
37 break;
38 }
39 return false;
40}
41
42static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
43{
44 if (AR_SREV_9271(ah)) {
45 INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271,
46 ARRAY_SIZE(ar9271Modes_9271), 6);
47 INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271,
48 ARRAY_SIZE(ar9271Common_9271), 2);
49 INIT_INI_ARRAY(&ah->iniCommon_normal_cck_fir_coeff_9271,
50 ar9271Common_normal_cck_fir_coeff_9271,
51 ARRAY_SIZE(ar9271Common_normal_cck_fir_coeff_9271), 2);
52 INIT_INI_ARRAY(&ah->iniCommon_japan_2484_cck_fir_coeff_9271,
53 ar9271Common_japan_2484_cck_fir_coeff_9271,
54 ARRAY_SIZE(ar9271Common_japan_2484_cck_fir_coeff_9271), 2);
55 INIT_INI_ARRAY(&ah->iniModes_9271_1_0_only,
56 ar9271Modes_9271_1_0_only,
57 ARRAY_SIZE(ar9271Modes_9271_1_0_only), 6);
58 INIT_INI_ARRAY(&ah->iniModes_9271_ANI_reg, ar9271Modes_9271_ANI_reg,
59 ARRAY_SIZE(ar9271Modes_9271_ANI_reg), 6);
60 INIT_INI_ARRAY(&ah->iniModes_high_power_tx_gain_9271,
61 ar9271Modes_high_power_tx_gain_9271,
62 ARRAY_SIZE(ar9271Modes_high_power_tx_gain_9271), 6);
63 INIT_INI_ARRAY(&ah->iniModes_normal_power_tx_gain_9271,
64 ar9271Modes_normal_power_tx_gain_9271,
65 ARRAY_SIZE(ar9271Modes_normal_power_tx_gain_9271), 6);
66 return;
67 }
68
69 if (AR_SREV_9287_11_OR_LATER(ah)) {
70 INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1,
71 ARRAY_SIZE(ar9287Modes_9287_1_1), 6);
72 INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1,
73 ARRAY_SIZE(ar9287Common_9287_1_1), 2);
74 if (ah->config.pcie_clock_req)
75 INIT_INI_ARRAY(&ah->iniPcieSerdes,
76 ar9287PciePhy_clkreq_off_L1_9287_1_1,
77 ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_1), 2);
78 else
79 INIT_INI_ARRAY(&ah->iniPcieSerdes,
80 ar9287PciePhy_clkreq_always_on_L1_9287_1_1,
81 ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_1),
82 2);
83 } else if (AR_SREV_9287_10_OR_LATER(ah)) {
84 INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_0,
85 ARRAY_SIZE(ar9287Modes_9287_1_0), 6);
86 INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_0,
87 ARRAY_SIZE(ar9287Common_9287_1_0), 2);
88
89 if (ah->config.pcie_clock_req)
90 INIT_INI_ARRAY(&ah->iniPcieSerdes,
91 ar9287PciePhy_clkreq_off_L1_9287_1_0,
92 ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_0), 2);
93 else
94 INIT_INI_ARRAY(&ah->iniPcieSerdes,
95 ar9287PciePhy_clkreq_always_on_L1_9287_1_0,
96 ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_0),
97 2);
98 } else if (AR_SREV_9285_12_OR_LATER(ah)) {
99
100
101 INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2,
102 ARRAY_SIZE(ar9285Modes_9285_1_2), 6);
103 INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2,
104 ARRAY_SIZE(ar9285Common_9285_1_2), 2);
105
106 if (ah->config.pcie_clock_req) {
107 INIT_INI_ARRAY(&ah->iniPcieSerdes,
108 ar9285PciePhy_clkreq_off_L1_9285_1_2,
109 ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285_1_2), 2);
110 } else {
111 INIT_INI_ARRAY(&ah->iniPcieSerdes,
112 ar9285PciePhy_clkreq_always_on_L1_9285_1_2,
113 ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285_1_2),
114 2);
115 }
116 } else if (AR_SREV_9285_10_OR_LATER(ah)) {
117 INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285,
118 ARRAY_SIZE(ar9285Modes_9285), 6);
119 INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285,
120 ARRAY_SIZE(ar9285Common_9285), 2);
121
122 if (ah->config.pcie_clock_req) {
123 INIT_INI_ARRAY(&ah->iniPcieSerdes,
124 ar9285PciePhy_clkreq_off_L1_9285,
125 ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285), 2);
126 } else {
127 INIT_INI_ARRAY(&ah->iniPcieSerdes,
128 ar9285PciePhy_clkreq_always_on_L1_9285,
129 ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285), 2);
130 }
131 } else if (AR_SREV_9280_20_OR_LATER(ah)) {
132 INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2,
133 ARRAY_SIZE(ar9280Modes_9280_2), 6);
134 INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2,
135 ARRAY_SIZE(ar9280Common_9280_2), 2);
136
137 if (ah->config.pcie_clock_req) {
138 INIT_INI_ARRAY(&ah->iniPcieSerdes,
139 ar9280PciePhy_clkreq_off_L1_9280,
140 ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280), 2);
141 } else {
142 INIT_INI_ARRAY(&ah->iniPcieSerdes,
143 ar9280PciePhy_clkreq_always_on_L1_9280,
144 ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2);
145 }
146 INIT_INI_ARRAY(&ah->iniModesAdditional,
147 ar9280Modes_fast_clock_9280_2,
148 ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3);
149 } else if (AR_SREV_9280_10_OR_LATER(ah)) {
150 INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280,
151 ARRAY_SIZE(ar9280Modes_9280), 6);
152 INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280,
153 ARRAY_SIZE(ar9280Common_9280), 2);
154 } else if (AR_SREV_9160_10_OR_LATER(ah)) {
155 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160,
156 ARRAY_SIZE(ar5416Modes_9160), 6);
157 INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160,
158 ARRAY_SIZE(ar5416Common_9160), 2);
159 INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9160,
160 ARRAY_SIZE(ar5416Bank0_9160), 2);
161 INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9160,
162 ARRAY_SIZE(ar5416BB_RfGain_9160), 3);
163 INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9160,
164 ARRAY_SIZE(ar5416Bank1_9160), 2);
165 INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9160,
166 ARRAY_SIZE(ar5416Bank2_9160), 2);
167 INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9160,
168 ARRAY_SIZE(ar5416Bank3_9160), 3);
169 INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9160,
170 ARRAY_SIZE(ar5416Bank6_9160), 3);
171 INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9160,
172 ARRAY_SIZE(ar5416Bank6TPC_9160), 3);
173 INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9160,
174 ARRAY_SIZE(ar5416Bank7_9160), 2);
175 if (AR_SREV_9160_11(ah)) {
176 INIT_INI_ARRAY(&ah->iniAddac,
177 ar5416Addac_91601_1,
178 ARRAY_SIZE(ar5416Addac_91601_1), 2);
179 } else {
180 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160,
181 ARRAY_SIZE(ar5416Addac_9160), 2);
182 }
183 } else if (AR_SREV_9100_OR_LATER(ah)) {
184 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100,
185 ARRAY_SIZE(ar5416Modes_9100), 6);
186 INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100,
187 ARRAY_SIZE(ar5416Common_9100), 2);
188 INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9100,
189 ARRAY_SIZE(ar5416Bank0_9100), 2);
190 INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9100,
191 ARRAY_SIZE(ar5416BB_RfGain_9100), 3);
192 INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9100,
193 ARRAY_SIZE(ar5416Bank1_9100), 2);
194 INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9100,
195 ARRAY_SIZE(ar5416Bank2_9100), 2);
196 INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9100,
197 ARRAY_SIZE(ar5416Bank3_9100), 3);
198 INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100,
199 ARRAY_SIZE(ar5416Bank6_9100), 3);
200 INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9100,
201 ARRAY_SIZE(ar5416Bank6TPC_9100), 3);
202 INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9100,
203 ARRAY_SIZE(ar5416Bank7_9100), 2);
204 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100,
205 ARRAY_SIZE(ar5416Addac_9100), 2);
206 } else {
207 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes,
208 ARRAY_SIZE(ar5416Modes), 6);
209 INIT_INI_ARRAY(&ah->iniCommon, ar5416Common,
210 ARRAY_SIZE(ar5416Common), 2);
211 INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0,
212 ARRAY_SIZE(ar5416Bank0), 2);
213 INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain,
214 ARRAY_SIZE(ar5416BB_RfGain), 3);
215 INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1,
216 ARRAY_SIZE(ar5416Bank1), 2);
217 INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2,
218 ARRAY_SIZE(ar5416Bank2), 2);
219 INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3,
220 ARRAY_SIZE(ar5416Bank3), 3);
221 INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6,
222 ARRAY_SIZE(ar5416Bank6), 3);
223 INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC,
224 ARRAY_SIZE(ar5416Bank6TPC), 3);
225 INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7,
226 ARRAY_SIZE(ar5416Bank7), 2);
227 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac,
228 ARRAY_SIZE(ar5416Addac), 2);
229 }
230}
231
232/* Support for Japan ch.14 (2484) spread */
233void ar9002_hw_cck_chan14_spread(struct ath_hw *ah)
234{
235 if (AR_SREV_9287_11_OR_LATER(ah)) {
236 INIT_INI_ARRAY(&ah->iniCckfirNormal,
237 ar9287Common_normal_cck_fir_coeff_92871_1,
238 ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_92871_1),
239 2);
240 INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
241 ar9287Common_japan_2484_cck_fir_coeff_92871_1,
242 ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_92871_1),
243 2);
244 }
245}
246
247static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah)
248{
249 u32 rxgain_type;
250
251 if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >=
252 AR5416_EEP_MINOR_VER_17) {
253 rxgain_type = ah->eep_ops->get_eeprom(ah, EEP_RXGAIN_TYPE);
254
255 if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF)
256 INIT_INI_ARRAY(&ah->iniModesRxGain,
257 ar9280Modes_backoff_13db_rxgain_9280_2,
258 ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 6);
259 else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF)
260 INIT_INI_ARRAY(&ah->iniModesRxGain,
261 ar9280Modes_backoff_23db_rxgain_9280_2,
262 ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 6);
263 else
264 INIT_INI_ARRAY(&ah->iniModesRxGain,
265 ar9280Modes_original_rxgain_9280_2,
266 ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
267 } else {
268 INIT_INI_ARRAY(&ah->iniModesRxGain,
269 ar9280Modes_original_rxgain_9280_2,
270 ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
271 }
272}
273
274static void ar9280_20_hw_init_txgain_ini(struct ath_hw *ah)
275{
276 u32 txgain_type;
277
278 if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >=
279 AR5416_EEP_MINOR_VER_19) {
280 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
281
282 if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
283 INIT_INI_ARRAY(&ah->iniModesTxGain,
284 ar9280Modes_high_power_tx_gain_9280_2,
285 ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 6);
286 else
287 INIT_INI_ARRAY(&ah->iniModesTxGain,
288 ar9280Modes_original_tx_gain_9280_2,
289 ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
290 } else {
291 INIT_INI_ARRAY(&ah->iniModesTxGain,
292 ar9280Modes_original_tx_gain_9280_2,
293 ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
294 }
295}
296
297static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah)
298{
299 if (AR_SREV_9287_11_OR_LATER(ah))
300 INIT_INI_ARRAY(&ah->iniModesRxGain,
301 ar9287Modes_rx_gain_9287_1_1,
302 ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6);
303 else if (AR_SREV_9287_10(ah))
304 INIT_INI_ARRAY(&ah->iniModesRxGain,
305 ar9287Modes_rx_gain_9287_1_0,
306 ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_0), 6);
307 else if (AR_SREV_9280_20(ah))
308 ar9280_20_hw_init_rxgain_ini(ah);
309
310 if (AR_SREV_9287_11_OR_LATER(ah)) {
311 INIT_INI_ARRAY(&ah->iniModesTxGain,
312 ar9287Modes_tx_gain_9287_1_1,
313 ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6);
314 } else if (AR_SREV_9287_10(ah)) {
315 INIT_INI_ARRAY(&ah->iniModesTxGain,
316 ar9287Modes_tx_gain_9287_1_0,
317 ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_0), 6);
318 } else if (AR_SREV_9280_20(ah)) {
319 ar9280_20_hw_init_txgain_ini(ah);
320 } else if (AR_SREV_9285_12_OR_LATER(ah)) {
321 u32 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
322
323 /* txgain table */
324 if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) {
325 if (AR_SREV_9285E_20(ah)) {
326 INIT_INI_ARRAY(&ah->iniModesTxGain,
327 ar9285Modes_XE2_0_high_power,
328 ARRAY_SIZE(
329 ar9285Modes_XE2_0_high_power), 6);
330 } else {
331 INIT_INI_ARRAY(&ah->iniModesTxGain,
332 ar9285Modes_high_power_tx_gain_9285_1_2,
333 ARRAY_SIZE(
334 ar9285Modes_high_power_tx_gain_9285_1_2), 6);
335 }
336 } else {
337 if (AR_SREV_9285E_20(ah)) {
338 INIT_INI_ARRAY(&ah->iniModesTxGain,
339 ar9285Modes_XE2_0_normal_power,
340 ARRAY_SIZE(
341 ar9285Modes_XE2_0_normal_power), 6);
342 } else {
343 INIT_INI_ARRAY(&ah->iniModesTxGain,
344 ar9285Modes_original_tx_gain_9285_1_2,
345 ARRAY_SIZE(
346 ar9285Modes_original_tx_gain_9285_1_2), 6);
347 }
348 }
349 }
350}
351
352/*
353 * Helper for ASPM support.
354 *
355 * Disable PLL when in L0s as well as receiver clock when in L1.
356 * This power saving option must be enabled through the SerDes.
357 *
358 * Programming the SerDes must go through the same 288 bit serial shift
359 * register as the other analog registers. Hence the 9 writes.
360 */
361static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
362 int restore,
363 int power_off)
364{
365 u8 i;
366 u32 val;
367
368 if (ah->is_pciexpress != true)
369 return;
370
371 /* Do not touch SerDes registers */
372 if (ah->config.pcie_powersave_enable == 2)
373 return;
374
375 /* Nothing to do on restore for 11N */
376 if (!restore) {
377 if (AR_SREV_9280_20_OR_LATER(ah)) {
378 /*
379 * AR9280 2.0 or later chips use SerDes values from the
380 * initvals.h initialized depending on chipset during
381 * __ath9k_hw_init()
382 */
383 for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) {
384 REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0),
385 INI_RA(&ah->iniPcieSerdes, i, 1));
386 }
387 } else if (AR_SREV_9280(ah) &&
388 (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) {
389 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
390 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
391
392 /* RX shut off when elecidle is asserted */
393 REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
394 REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
395 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
396
397 /* Shut off CLKREQ active in L1 */
398 if (ah->config.pcie_clock_req)
399 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
400 else
401 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
402
403 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
404 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
405 REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
406
407 /* Load the new settings */
408 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
409
410 } else {
411 ENABLE_REGWRITE_BUFFER(ah);
412
413 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
414 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
415
416 /* RX shut off when elecidle is asserted */
417 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
418 REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
419 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
420
421 /*
422 * Ignore ah->ah_config.pcie_clock_req setting for
423 * pre-AR9280 11n
424 */
425 REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
426
427 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
428 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
429 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
430
431 /* Load the new settings */
432 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
433
434 REGWRITE_BUFFER_FLUSH(ah);
435 DISABLE_REGWRITE_BUFFER(ah);
436 }
437
438 udelay(1000);
439
440 /* set bit 19 to allow forcing of pcie core into L1 state */
441 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
442
443 /* Several PCIe massages to ensure proper behaviour */
444 if (ah->config.pcie_waen) {
445 val = ah->config.pcie_waen;
446 if (!power_off)
447 val &= (~AR_WA_D3_L1_DISABLE);
448 } else {
449 if (AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
450 AR_SREV_9287(ah)) {
451 val = AR9285_WA_DEFAULT;
452 if (!power_off)
453 val &= (~AR_WA_D3_L1_DISABLE);
454 } else if (AR_SREV_9280(ah)) {
455 /*
456 * On AR9280 chips bit 22 of 0x4004 needs to be
457 * set otherwise card may disappear.
458 */
459 val = AR9280_WA_DEFAULT;
460 if (!power_off)
461 val &= (~AR_WA_D3_L1_DISABLE);
462 } else
463 val = AR_WA_DEFAULT;
464 }
465
466 REG_WRITE(ah, AR_WA, val);
467 }
468
469 if (power_off) {
470 /*
471 * Set PCIe workaround bits
472 * bit 14 in WA register (disable L1) should only
473 * be set when device enters D3 and be cleared
474 * when device comes back to D0.
475 */
476 if (ah->config.pcie_waen) {
477 if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
478 REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
479 } else {
480 if (((AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
481 AR_SREV_9287(ah)) &&
482 (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
483 (AR_SREV_9280(ah) &&
484 (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
485 REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
486 }
487 }
488 }
489}
490
491static int ar9002_hw_get_radiorev(struct ath_hw *ah)
492{
493 u32 val;
494 int i;
495
496 ENABLE_REGWRITE_BUFFER(ah);
497
498 REG_WRITE(ah, AR_PHY(0x36), 0x00007058);
499 for (i = 0; i < 8; i++)
500 REG_WRITE(ah, AR_PHY(0x20), 0x00010000);
501
502 REGWRITE_BUFFER_FLUSH(ah);
503 DISABLE_REGWRITE_BUFFER(ah);
504
505 val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff;
506 val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4);
507
508 return ath9k_hw_reverse_bits(val, 8);
509}
510
511int ar9002_hw_rf_claim(struct ath_hw *ah)
512{
513 u32 val;
514
515 REG_WRITE(ah, AR_PHY(0), 0x00000007);
516
517 val = ar9002_hw_get_radiorev(ah);
518 switch (val & AR_RADIO_SREV_MAJOR) {
519 case 0:
520 val = AR_RAD5133_SREV_MAJOR;
521 break;
522 case AR_RAD5133_SREV_MAJOR:
523 case AR_RAD5122_SREV_MAJOR:
524 case AR_RAD2133_SREV_MAJOR:
525 case AR_RAD2122_SREV_MAJOR:
526 break;
527 default:
528 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
529 "Radio Chip Rev 0x%02X not supported\n",
530 val & AR_RADIO_SREV_MAJOR);
531 return -EOPNOTSUPP;
532 }
533
534 ah->hw_version.analog5GhzRev = val;
535
536 return 0;
537}
538
539/*
540 * Enable ASYNC FIFO
541 *
542 * If Async FIFO is enabled, the following counters change as MAC now runs
543 * at 117 Mhz instead of 88/44MHz when async FIFO is disabled.
544 *
545 * The values below tested for ht40 2 chain.
546 * Overwrite the delay/timeouts initialized in process ini.
547 */
548void ar9002_hw_enable_async_fifo(struct ath_hw *ah)
549{
550 if (AR_SREV_9287_12_OR_LATER(ah)) {
551 REG_WRITE(ah, AR_D_GBL_IFS_SIFS,
552 AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR);
553 REG_WRITE(ah, AR_D_GBL_IFS_SLOT,
554 AR_D_GBL_IFS_SLOT_ASYNC_FIFO_DUR);
555 REG_WRITE(ah, AR_D_GBL_IFS_EIFS,
556 AR_D_GBL_IFS_EIFS_ASYNC_FIFO_DUR);
557
558 REG_WRITE(ah, AR_TIME_OUT, AR_TIME_OUT_ACK_CTS_ASYNC_FIFO_DUR);
559 REG_WRITE(ah, AR_USEC, AR_USEC_ASYNC_FIFO_DUR);
560
561 REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER,
562 AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768);
563 REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN,
564 AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL);
565 }
566}
567
568/*
569 * We don't enable WEP aggregation on mac80211 but we keep this
570 * around for HAL unification purposes.
571 */
572void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah)
573{
574 if (AR_SREV_9287_12_OR_LATER(ah)) {
575 REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
576 AR_PCU_MISC_MODE2_ENABLE_AGGWEP);
577 }
578}
579
580/* Sets up the AR5008/AR9001/AR9002 hardware familiy callbacks */
581void ar9002_hw_attach_ops(struct ath_hw *ah)
582{
583 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
584 struct ath_hw_ops *ops = ath9k_hw_ops(ah);
585
586 priv_ops->init_mode_regs = ar9002_hw_init_mode_regs;
587 priv_ops->init_mode_gain_regs = ar9002_hw_init_mode_gain_regs;
588 priv_ops->macversion_supported = ar9002_hw_macversion_supported;
589
590 ops->config_pci_powersave = ar9002_hw_configpcipowersave;
591
592 ar5008_hw_attach_phy_ops(ah);
593 if (AR_SREV_9280_10_OR_LATER(ah))
594 ar9002_hw_attach_phy_ops(ah);
595
596 ar9002_hw_attach_calib_ops(ah);
597 ar9002_hw_attach_mac_ops(ah);
598}
diff --git a/drivers/net/wireless/ath/ath9k/initvals.h b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
index 455e9d3b3f13..dae7f3304eb8 100644
--- a/drivers/net/wireless/ath/ath9k/initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2010 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -14,1982 +14,9 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17static const u32 ar5416Modes[][6] = { 17#ifndef INITVALS_9002_10_H
18 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 18#define INITVALS_9002_10_H
19 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
20 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
21 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
22 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
23 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
24 { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 },
25 { 0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a },
26 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
27 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
28 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
29 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
30 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
31 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
32 { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 },
33 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
34 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
35 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
36 { 0x00009850, 0x6c48b4e0, 0x6d48b4e0, 0x6d48b0de, 0x6c48b0de, 0x6c48b0de },
37 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
38 { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e },
39 { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 },
40 { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
41 { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 },
42 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
43 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
44 { 0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134 },
45 { 0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b },
46 { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
47 { 0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
48 { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
49 { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
50 { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 },
51 { 0x000099bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 },
52 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
53 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
54 { 0x000099c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c },
55 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
56 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
57 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
58 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
59 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
60 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
61 { 0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
62 { 0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
63 { 0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
64 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
65 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
66 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
67 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
68 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
69 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
70 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
71 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
72 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
73 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
74 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
75 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
76 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
77 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
78 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
79 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
80 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
81};
82
83static const u32 ar5416Common[][2] = {
84 { 0x0000000c, 0x00000000 },
85 { 0x00000030, 0x00020015 },
86 { 0x00000034, 0x00000005 },
87 { 0x00000040, 0x00000000 },
88 { 0x00000044, 0x00000008 },
89 { 0x00000048, 0x00000008 },
90 { 0x0000004c, 0x00000010 },
91 { 0x00000050, 0x00000000 },
92 { 0x00000054, 0x0000001f },
93 { 0x00000800, 0x00000000 },
94 { 0x00000804, 0x00000000 },
95 { 0x00000808, 0x00000000 },
96 { 0x0000080c, 0x00000000 },
97 { 0x00000810, 0x00000000 },
98 { 0x00000814, 0x00000000 },
99 { 0x00000818, 0x00000000 },
100 { 0x0000081c, 0x00000000 },
101 { 0x00000820, 0x00000000 },
102 { 0x00000824, 0x00000000 },
103 { 0x00001040, 0x002ffc0f },
104 { 0x00001044, 0x002ffc0f },
105 { 0x00001048, 0x002ffc0f },
106 { 0x0000104c, 0x002ffc0f },
107 { 0x00001050, 0x002ffc0f },
108 { 0x00001054, 0x002ffc0f },
109 { 0x00001058, 0x002ffc0f },
110 { 0x0000105c, 0x002ffc0f },
111 { 0x00001060, 0x002ffc0f },
112 { 0x00001064, 0x002ffc0f },
113 { 0x00001230, 0x00000000 },
114 { 0x00001270, 0x00000000 },
115 { 0x00001038, 0x00000000 },
116 { 0x00001078, 0x00000000 },
117 { 0x000010b8, 0x00000000 },
118 { 0x000010f8, 0x00000000 },
119 { 0x00001138, 0x00000000 },
120 { 0x00001178, 0x00000000 },
121 { 0x000011b8, 0x00000000 },
122 { 0x000011f8, 0x00000000 },
123 { 0x00001238, 0x00000000 },
124 { 0x00001278, 0x00000000 },
125 { 0x000012b8, 0x00000000 },
126 { 0x000012f8, 0x00000000 },
127 { 0x00001338, 0x00000000 },
128 { 0x00001378, 0x00000000 },
129 { 0x000013b8, 0x00000000 },
130 { 0x000013f8, 0x00000000 },
131 { 0x00001438, 0x00000000 },
132 { 0x00001478, 0x00000000 },
133 { 0x000014b8, 0x00000000 },
134 { 0x000014f8, 0x00000000 },
135 { 0x00001538, 0x00000000 },
136 { 0x00001578, 0x00000000 },
137 { 0x000015b8, 0x00000000 },
138 { 0x000015f8, 0x00000000 },
139 { 0x00001638, 0x00000000 },
140 { 0x00001678, 0x00000000 },
141 { 0x000016b8, 0x00000000 },
142 { 0x000016f8, 0x00000000 },
143 { 0x00001738, 0x00000000 },
144 { 0x00001778, 0x00000000 },
145 { 0x000017b8, 0x00000000 },
146 { 0x000017f8, 0x00000000 },
147 { 0x0000103c, 0x00000000 },
148 { 0x0000107c, 0x00000000 },
149 { 0x000010bc, 0x00000000 },
150 { 0x000010fc, 0x00000000 },
151 { 0x0000113c, 0x00000000 },
152 { 0x0000117c, 0x00000000 },
153 { 0x000011bc, 0x00000000 },
154 { 0x000011fc, 0x00000000 },
155 { 0x0000123c, 0x00000000 },
156 { 0x0000127c, 0x00000000 },
157 { 0x000012bc, 0x00000000 },
158 { 0x000012fc, 0x00000000 },
159 { 0x0000133c, 0x00000000 },
160 { 0x0000137c, 0x00000000 },
161 { 0x000013bc, 0x00000000 },
162 { 0x000013fc, 0x00000000 },
163 { 0x0000143c, 0x00000000 },
164 { 0x0000147c, 0x00000000 },
165 { 0x00004030, 0x00000002 },
166 { 0x0000403c, 0x00000002 },
167 { 0x00007010, 0x00000000 },
168 { 0x00007038, 0x000004c2 },
169 { 0x00008004, 0x00000000 },
170 { 0x00008008, 0x00000000 },
171 { 0x0000800c, 0x00000000 },
172 { 0x00008018, 0x00000700 },
173 { 0x00008020, 0x00000000 },
174 { 0x00008038, 0x00000000 },
175 { 0x0000803c, 0x00000000 },
176 { 0x00008048, 0x40000000 },
177 { 0x00008054, 0x00000000 },
178 { 0x00008058, 0x00000000 },
179 { 0x0000805c, 0x000fc78f },
180 { 0x00008060, 0x0000000f },
181 { 0x00008064, 0x00000000 },
182 { 0x000080c0, 0x2a82301a },
183 { 0x000080c4, 0x05dc01e0 },
184 { 0x000080c8, 0x1f402710 },
185 { 0x000080cc, 0x01f40000 },
186 { 0x000080d0, 0x00001e00 },
187 { 0x000080d4, 0x00000000 },
188 { 0x000080d8, 0x00400000 },
189 { 0x000080e0, 0xffffffff },
190 { 0x000080e4, 0x0000ffff },
191 { 0x000080e8, 0x003f3f3f },
192 { 0x000080ec, 0x00000000 },
193 { 0x000080f0, 0x00000000 },
194 { 0x000080f4, 0x00000000 },
195 { 0x000080f8, 0x00000000 },
196 { 0x000080fc, 0x00020000 },
197 { 0x00008100, 0x00020000 },
198 { 0x00008104, 0x00000001 },
199 { 0x00008108, 0x00000052 },
200 { 0x0000810c, 0x00000000 },
201 { 0x00008110, 0x00000168 },
202 { 0x00008118, 0x000100aa },
203 { 0x0000811c, 0x00003210 },
204 { 0x00008124, 0x00000000 },
205 { 0x00008128, 0x00000000 },
206 { 0x0000812c, 0x00000000 },
207 { 0x00008130, 0x00000000 },
208 { 0x00008134, 0x00000000 },
209 { 0x00008138, 0x00000000 },
210 { 0x0000813c, 0x00000000 },
211 { 0x00008144, 0xffffffff },
212 { 0x00008168, 0x00000000 },
213 { 0x0000816c, 0x00000000 },
214 { 0x00008170, 0x32143320 },
215 { 0x00008174, 0xfaa4fa50 },
216 { 0x00008178, 0x00000100 },
217 { 0x0000817c, 0x00000000 },
218 { 0x000081c4, 0x00000000 },
219 { 0x000081ec, 0x00000000 },
220 { 0x000081f0, 0x00000000 },
221 { 0x000081f4, 0x00000000 },
222 { 0x000081f8, 0x00000000 },
223 { 0x000081fc, 0x00000000 },
224 { 0x00008200, 0x00000000 },
225 { 0x00008204, 0x00000000 },
226 { 0x00008208, 0x00000000 },
227 { 0x0000820c, 0x00000000 },
228 { 0x00008210, 0x00000000 },
229 { 0x00008214, 0x00000000 },
230 { 0x00008218, 0x00000000 },
231 { 0x0000821c, 0x00000000 },
232 { 0x00008220, 0x00000000 },
233 { 0x00008224, 0x00000000 },
234 { 0x00008228, 0x00000000 },
235 { 0x0000822c, 0x00000000 },
236 { 0x00008230, 0x00000000 },
237 { 0x00008234, 0x00000000 },
238 { 0x00008238, 0x00000000 },
239 { 0x0000823c, 0x00000000 },
240 { 0x00008240, 0x00100000 },
241 { 0x00008244, 0x0010f400 },
242 { 0x00008248, 0x00000100 },
243 { 0x0000824c, 0x0001e800 },
244 { 0x00008250, 0x00000000 },
245 { 0x00008254, 0x00000000 },
246 { 0x00008258, 0x00000000 },
247 { 0x0000825c, 0x400000ff },
248 { 0x00008260, 0x00080922 },
249 { 0x00008264, 0xa8000010 },
250 { 0x00008270, 0x00000000 },
251 { 0x00008274, 0x40000000 },
252 { 0x00008278, 0x003e4180 },
253 { 0x0000827c, 0x00000000 },
254 { 0x00008284, 0x0000002c },
255 { 0x00008288, 0x0000002c },
256 { 0x0000828c, 0x00000000 },
257 { 0x00008294, 0x00000000 },
258 { 0x00008298, 0x00000000 },
259 { 0x00008300, 0x00000000 },
260 { 0x00008304, 0x00000000 },
261 { 0x00008308, 0x00000000 },
262 { 0x0000830c, 0x00000000 },
263 { 0x00008310, 0x00000000 },
264 { 0x00008314, 0x00000000 },
265 { 0x00008318, 0x00000000 },
266 { 0x00008328, 0x00000000 },
267 { 0x0000832c, 0x00000007 },
268 { 0x00008330, 0x00000302 },
269 { 0x00008334, 0x00000e00 },
270 { 0x00008338, 0x00070000 },
271 { 0x0000833c, 0x00000000 },
272 { 0x00008340, 0x000107ff },
273 { 0x00009808, 0x00000000 },
274 { 0x0000980c, 0xad848e19 },
275 { 0x00009810, 0x7d14e000 },
276 { 0x00009814, 0x9c0a9f6b },
277 { 0x0000981c, 0x00000000 },
278 { 0x0000982c, 0x0000a000 },
279 { 0x00009830, 0x00000000 },
280 { 0x0000983c, 0x00200400 },
281 { 0x00009840, 0x206a002e },
282 { 0x0000984c, 0x1284233c },
283 { 0x00009854, 0x00000859 },
284 { 0x00009900, 0x00000000 },
285 { 0x00009904, 0x00000000 },
286 { 0x00009908, 0x00000000 },
287 { 0x0000990c, 0x00000000 },
288 { 0x0000991c, 0x10000fff },
289 { 0x00009920, 0x05100000 },
290 { 0x0000a920, 0x05100000 },
291 { 0x0000b920, 0x05100000 },
292 { 0x00009928, 0x00000001 },
293 { 0x0000992c, 0x00000004 },
294 { 0x00009934, 0x1e1f2022 },
295 { 0x00009938, 0x0a0b0c0d },
296 { 0x0000993c, 0x00000000 },
297 { 0x00009948, 0x9280b212 },
298 { 0x0000994c, 0x00020028 },
299 { 0x00009954, 0x5d50e188 },
300 { 0x00009958, 0x00081fff },
301 { 0x0000c95c, 0x004b6a8e },
302 { 0x0000c968, 0x000003ce },
303 { 0x00009970, 0x190fb515 },
304 { 0x00009974, 0x00000000 },
305 { 0x00009978, 0x00000001 },
306 { 0x0000997c, 0x00000000 },
307 { 0x00009980, 0x00000000 },
308 { 0x00009984, 0x00000000 },
309 { 0x00009988, 0x00000000 },
310 { 0x0000998c, 0x00000000 },
311 { 0x00009990, 0x00000000 },
312 { 0x00009994, 0x00000000 },
313 { 0x00009998, 0x00000000 },
314 { 0x0000999c, 0x00000000 },
315 { 0x000099a0, 0x00000000 },
316 { 0x000099a4, 0x00000001 },
317 { 0x000099a8, 0x001fff00 },
318 { 0x000099ac, 0x00000000 },
319 { 0x000099b0, 0x03051000 },
320 { 0x000099dc, 0x00000000 },
321 { 0x000099e0, 0x00000200 },
322 { 0x000099e4, 0xaaaaaaaa },
323 { 0x000099e8, 0x3c466478 },
324 { 0x000099ec, 0x000000aa },
325 { 0x000099fc, 0x00001042 },
326 { 0x00009b00, 0x00000000 },
327 { 0x00009b04, 0x00000001 },
328 { 0x00009b08, 0x00000002 },
329 { 0x00009b0c, 0x00000003 },
330 { 0x00009b10, 0x00000004 },
331 { 0x00009b14, 0x00000005 },
332 { 0x00009b18, 0x00000008 },
333 { 0x00009b1c, 0x00000009 },
334 { 0x00009b20, 0x0000000a },
335 { 0x00009b24, 0x0000000b },
336 { 0x00009b28, 0x0000000c },
337 { 0x00009b2c, 0x0000000d },
338 { 0x00009b30, 0x00000010 },
339 { 0x00009b34, 0x00000011 },
340 { 0x00009b38, 0x00000012 },
341 { 0x00009b3c, 0x00000013 },
342 { 0x00009b40, 0x00000014 },
343 { 0x00009b44, 0x00000015 },
344 { 0x00009b48, 0x00000018 },
345 { 0x00009b4c, 0x00000019 },
346 { 0x00009b50, 0x0000001a },
347 { 0x00009b54, 0x0000001b },
348 { 0x00009b58, 0x0000001c },
349 { 0x00009b5c, 0x0000001d },
350 { 0x00009b60, 0x00000020 },
351 { 0x00009b64, 0x00000021 },
352 { 0x00009b68, 0x00000022 },
353 { 0x00009b6c, 0x00000023 },
354 { 0x00009b70, 0x00000024 },
355 { 0x00009b74, 0x00000025 },
356 { 0x00009b78, 0x00000028 },
357 { 0x00009b7c, 0x00000029 },
358 { 0x00009b80, 0x0000002a },
359 { 0x00009b84, 0x0000002b },
360 { 0x00009b88, 0x0000002c },
361 { 0x00009b8c, 0x0000002d },
362 { 0x00009b90, 0x00000030 },
363 { 0x00009b94, 0x00000031 },
364 { 0x00009b98, 0x00000032 },
365 { 0x00009b9c, 0x00000033 },
366 { 0x00009ba0, 0x00000034 },
367 { 0x00009ba4, 0x00000035 },
368 { 0x00009ba8, 0x00000035 },
369 { 0x00009bac, 0x00000035 },
370 { 0x00009bb0, 0x00000035 },
371 { 0x00009bb4, 0x00000035 },
372 { 0x00009bb8, 0x00000035 },
373 { 0x00009bbc, 0x00000035 },
374 { 0x00009bc0, 0x00000035 },
375 { 0x00009bc4, 0x00000035 },
376 { 0x00009bc8, 0x00000035 },
377 { 0x00009bcc, 0x00000035 },
378 { 0x00009bd0, 0x00000035 },
379 { 0x00009bd4, 0x00000035 },
380 { 0x00009bd8, 0x00000035 },
381 { 0x00009bdc, 0x00000035 },
382 { 0x00009be0, 0x00000035 },
383 { 0x00009be4, 0x00000035 },
384 { 0x00009be8, 0x00000035 },
385 { 0x00009bec, 0x00000035 },
386 { 0x00009bf0, 0x00000035 },
387 { 0x00009bf4, 0x00000035 },
388 { 0x00009bf8, 0x00000010 },
389 { 0x00009bfc, 0x0000001a },
390 { 0x0000a210, 0x40806333 },
391 { 0x0000a214, 0x00106c10 },
392 { 0x0000a218, 0x009c4060 },
393 { 0x0000a220, 0x018830c6 },
394 { 0x0000a224, 0x00000400 },
395 { 0x0000a228, 0x00000bb5 },
396 { 0x0000a22c, 0x00000011 },
397 { 0x0000a234, 0x20202020 },
398 { 0x0000a238, 0x20202020 },
399 { 0x0000a23c, 0x13c889af },
400 { 0x0000a240, 0x38490a20 },
401 { 0x0000a244, 0x00007bb6 },
402 { 0x0000a248, 0x0fff3ffc },
403 { 0x0000a24c, 0x00000001 },
404 { 0x0000a250, 0x0000a000 },
405 { 0x0000a254, 0x00000000 },
406 { 0x0000a258, 0x0cc75380 },
407 { 0x0000a25c, 0x0f0f0f01 },
408 { 0x0000a260, 0xdfa91f01 },
409 { 0x0000a268, 0x00000000 },
410 { 0x0000a26c, 0x0e79e5c6 },
411 { 0x0000b26c, 0x0e79e5c6 },
412 { 0x0000c26c, 0x0e79e5c6 },
413 { 0x0000d270, 0x00820820 },
414 { 0x0000a278, 0x1ce739ce },
415 { 0x0000a27c, 0x051701ce },
416 { 0x0000a338, 0x00000000 },
417 { 0x0000a33c, 0x00000000 },
418 { 0x0000a340, 0x00000000 },
419 { 0x0000a344, 0x00000000 },
420 { 0x0000a348, 0x3fffffff },
421 { 0x0000a34c, 0x3fffffff },
422 { 0x0000a350, 0x3fffffff },
423 { 0x0000a354, 0x0003ffff },
424 { 0x0000a358, 0x79a8aa1f },
425 { 0x0000d35c, 0x07ffffef },
426 { 0x0000d360, 0x0fffffe7 },
427 { 0x0000d364, 0x17ffffe5 },
428 { 0x0000d368, 0x1fffffe4 },
429 { 0x0000d36c, 0x37ffffe3 },
430 { 0x0000d370, 0x3fffffe3 },
431 { 0x0000d374, 0x57ffffe3 },
432 { 0x0000d378, 0x5fffffe2 },
433 { 0x0000d37c, 0x7fffffe2 },
434 { 0x0000d380, 0x7f3c7bba },
435 { 0x0000d384, 0xf3307ff0 },
436 { 0x0000a388, 0x08000000 },
437 { 0x0000a38c, 0x20202020 },
438 { 0x0000a390, 0x20202020 },
439 { 0x0000a394, 0x1ce739ce },
440 { 0x0000a398, 0x000001ce },
441 { 0x0000a39c, 0x00000001 },
442 { 0x0000a3a0, 0x00000000 },
443 { 0x0000a3a4, 0x00000000 },
444 { 0x0000a3a8, 0x00000000 },
445 { 0x0000a3ac, 0x00000000 },
446 { 0x0000a3b0, 0x00000000 },
447 { 0x0000a3b4, 0x00000000 },
448 { 0x0000a3b8, 0x00000000 },
449 { 0x0000a3bc, 0x00000000 },
450 { 0x0000a3c0, 0x00000000 },
451 { 0x0000a3c4, 0x00000000 },
452 { 0x0000a3c8, 0x00000246 },
453 { 0x0000a3cc, 0x20202020 },
454 { 0x0000a3d0, 0x20202020 },
455 { 0x0000a3d4, 0x20202020 },
456 { 0x0000a3dc, 0x1ce739ce },
457 { 0x0000a3e0, 0x000001ce },
458};
459
460static const u32 ar5416Bank0[][2] = {
461 { 0x000098b0, 0x1e5795e5 },
462 { 0x000098e0, 0x02008020 },
463};
464
465static const u32 ar5416BB_RfGain[][3] = {
466 { 0x00009a00, 0x00000000, 0x00000000 },
467 { 0x00009a04, 0x00000040, 0x00000040 },
468 { 0x00009a08, 0x00000080, 0x00000080 },
469 { 0x00009a0c, 0x000001a1, 0x00000141 },
470 { 0x00009a10, 0x000001e1, 0x00000181 },
471 { 0x00009a14, 0x00000021, 0x000001c1 },
472 { 0x00009a18, 0x00000061, 0x00000001 },
473 { 0x00009a1c, 0x00000168, 0x00000041 },
474 { 0x00009a20, 0x000001a8, 0x000001a8 },
475 { 0x00009a24, 0x000001e8, 0x000001e8 },
476 { 0x00009a28, 0x00000028, 0x00000028 },
477 { 0x00009a2c, 0x00000068, 0x00000068 },
478 { 0x00009a30, 0x00000189, 0x000000a8 },
479 { 0x00009a34, 0x000001c9, 0x00000169 },
480 { 0x00009a38, 0x00000009, 0x000001a9 },
481 { 0x00009a3c, 0x00000049, 0x000001e9 },
482 { 0x00009a40, 0x00000089, 0x00000029 },
483 { 0x00009a44, 0x00000170, 0x00000069 },
484 { 0x00009a48, 0x000001b0, 0x00000190 },
485 { 0x00009a4c, 0x000001f0, 0x000001d0 },
486 { 0x00009a50, 0x00000030, 0x00000010 },
487 { 0x00009a54, 0x00000070, 0x00000050 },
488 { 0x00009a58, 0x00000191, 0x00000090 },
489 { 0x00009a5c, 0x000001d1, 0x00000151 },
490 { 0x00009a60, 0x00000011, 0x00000191 },
491 { 0x00009a64, 0x00000051, 0x000001d1 },
492 { 0x00009a68, 0x00000091, 0x00000011 },
493 { 0x00009a6c, 0x000001b8, 0x00000051 },
494 { 0x00009a70, 0x000001f8, 0x00000198 },
495 { 0x00009a74, 0x00000038, 0x000001d8 },
496 { 0x00009a78, 0x00000078, 0x00000018 },
497 { 0x00009a7c, 0x00000199, 0x00000058 },
498 { 0x00009a80, 0x000001d9, 0x00000098 },
499 { 0x00009a84, 0x00000019, 0x00000159 },
500 { 0x00009a88, 0x00000059, 0x00000199 },
501 { 0x00009a8c, 0x00000099, 0x000001d9 },
502 { 0x00009a90, 0x000000d9, 0x00000019 },
503 { 0x00009a94, 0x000000f9, 0x00000059 },
504 { 0x00009a98, 0x000000f9, 0x00000099 },
505 { 0x00009a9c, 0x000000f9, 0x000000d9 },
506 { 0x00009aa0, 0x000000f9, 0x000000f9 },
507 { 0x00009aa4, 0x000000f9, 0x000000f9 },
508 { 0x00009aa8, 0x000000f9, 0x000000f9 },
509 { 0x00009aac, 0x000000f9, 0x000000f9 },
510 { 0x00009ab0, 0x000000f9, 0x000000f9 },
511 { 0x00009ab4, 0x000000f9, 0x000000f9 },
512 { 0x00009ab8, 0x000000f9, 0x000000f9 },
513 { 0x00009abc, 0x000000f9, 0x000000f9 },
514 { 0x00009ac0, 0x000000f9, 0x000000f9 },
515 { 0x00009ac4, 0x000000f9, 0x000000f9 },
516 { 0x00009ac8, 0x000000f9, 0x000000f9 },
517 { 0x00009acc, 0x000000f9, 0x000000f9 },
518 { 0x00009ad0, 0x000000f9, 0x000000f9 },
519 { 0x00009ad4, 0x000000f9, 0x000000f9 },
520 { 0x00009ad8, 0x000000f9, 0x000000f9 },
521 { 0x00009adc, 0x000000f9, 0x000000f9 },
522 { 0x00009ae0, 0x000000f9, 0x000000f9 },
523 { 0x00009ae4, 0x000000f9, 0x000000f9 },
524 { 0x00009ae8, 0x000000f9, 0x000000f9 },
525 { 0x00009aec, 0x000000f9, 0x000000f9 },
526 { 0x00009af0, 0x000000f9, 0x000000f9 },
527 { 0x00009af4, 0x000000f9, 0x000000f9 },
528 { 0x00009af8, 0x000000f9, 0x000000f9 },
529 { 0x00009afc, 0x000000f9, 0x000000f9 },
530};
531
532static const u32 ar5416Bank1[][2] = {
533 { 0x000098b0, 0x02108421 },
534 { 0x000098ec, 0x00000008 },
535};
536
537static const u32 ar5416Bank2[][2] = {
538 { 0x000098b0, 0x0e73ff17 },
539 { 0x000098e0, 0x00000420 },
540};
541
542static const u32 ar5416Bank3[][3] = {
543 { 0x000098f0, 0x01400018, 0x01c00018 },
544};
545
546static const u32 ar5416Bank6[][3] = {
547
548 { 0x0000989c, 0x00000000, 0x00000000 },
549 { 0x0000989c, 0x00000000, 0x00000000 },
550 { 0x0000989c, 0x00000000, 0x00000000 },
551 { 0x0000989c, 0x00e00000, 0x00e00000 },
552 { 0x0000989c, 0x005e0000, 0x005e0000 },
553 { 0x0000989c, 0x00120000, 0x00120000 },
554 { 0x0000989c, 0x00620000, 0x00620000 },
555 { 0x0000989c, 0x00020000, 0x00020000 },
556 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
557 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
558 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
559 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
560 { 0x0000989c, 0x005f0000, 0x005f0000 },
561 { 0x0000989c, 0x00870000, 0x00870000 },
562 { 0x0000989c, 0x00f90000, 0x00f90000 },
563 { 0x0000989c, 0x007b0000, 0x007b0000 },
564 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
565 { 0x0000989c, 0x00f50000, 0x00f50000 },
566 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
567 { 0x0000989c, 0x00110000, 0x00110000 },
568 { 0x0000989c, 0x006100a8, 0x006100a8 },
569 { 0x0000989c, 0x004210a2, 0x004210a2 },
570 { 0x0000989c, 0x0014008f, 0x0014008f },
571 { 0x0000989c, 0x00c40003, 0x00c40003 },
572 { 0x0000989c, 0x003000f2, 0x003000f2 },
573 { 0x0000989c, 0x00440016, 0x00440016 },
574 { 0x0000989c, 0x00410040, 0x00410040 },
575 { 0x0000989c, 0x0001805e, 0x0001805e },
576 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
577 { 0x0000989c, 0x000000f1, 0x000000f1 },
578 { 0x0000989c, 0x00002081, 0x00002081 },
579 { 0x0000989c, 0x000000d4, 0x000000d4 },
580 { 0x000098d0, 0x0000000f, 0x0010000f },
581};
582
583static const u32 ar5416Bank6TPC[][3] = {
584 { 0x0000989c, 0x00000000, 0x00000000 },
585 { 0x0000989c, 0x00000000, 0x00000000 },
586 { 0x0000989c, 0x00000000, 0x00000000 },
587 { 0x0000989c, 0x00e00000, 0x00e00000 },
588 { 0x0000989c, 0x005e0000, 0x005e0000 },
589 { 0x0000989c, 0x00120000, 0x00120000 },
590 { 0x0000989c, 0x00620000, 0x00620000 },
591 { 0x0000989c, 0x00020000, 0x00020000 },
592 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
593 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
594 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
595 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
596 { 0x0000989c, 0x005f0000, 0x005f0000 },
597 { 0x0000989c, 0x00870000, 0x00870000 },
598 { 0x0000989c, 0x00f90000, 0x00f90000 },
599 { 0x0000989c, 0x007b0000, 0x007b0000 },
600 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
601 { 0x0000989c, 0x00f50000, 0x00f50000 },
602 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
603 { 0x0000989c, 0x00110000, 0x00110000 },
604 { 0x0000989c, 0x006100a8, 0x006100a8 },
605 { 0x0000989c, 0x00423022, 0x00423022 },
606 { 0x0000989c, 0x201400df, 0x201400df },
607 { 0x0000989c, 0x00c40002, 0x00c40002 },
608 { 0x0000989c, 0x003000f2, 0x003000f2 },
609 { 0x0000989c, 0x00440016, 0x00440016 },
610 { 0x0000989c, 0x00410040, 0x00410040 },
611 { 0x0000989c, 0x0001805e, 0x0001805e },
612 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
613 { 0x0000989c, 0x000000e1, 0x000000e1 },
614 { 0x0000989c, 0x00007081, 0x00007081 },
615 { 0x0000989c, 0x000000d4, 0x000000d4 },
616 { 0x000098d0, 0x0000000f, 0x0010000f },
617};
618
619static const u32 ar5416Bank7[][2] = {
620 { 0x0000989c, 0x00000500 },
621 { 0x0000989c, 0x00000800 },
622 { 0x000098cc, 0x0000000e },
623};
624
625static const u32 ar5416Addac[][2] = {
626 {0x0000989c, 0x00000000 },
627 {0x0000989c, 0x00000003 },
628 {0x0000989c, 0x00000000 },
629 {0x0000989c, 0x0000000c },
630 {0x0000989c, 0x00000000 },
631 {0x0000989c, 0x00000030 },
632 {0x0000989c, 0x00000000 },
633 {0x0000989c, 0x00000000 },
634 {0x0000989c, 0x00000000 },
635 {0x0000989c, 0x00000000 },
636 {0x0000989c, 0x00000000 },
637 {0x0000989c, 0x00000000 },
638 {0x0000989c, 0x00000000 },
639 {0x0000989c, 0x00000000 },
640 {0x0000989c, 0x00000000 },
641 {0x0000989c, 0x00000000 },
642 {0x0000989c, 0x00000000 },
643 {0x0000989c, 0x00000000 },
644 {0x0000989c, 0x00000060 },
645 {0x0000989c, 0x00000000 },
646 {0x0000989c, 0x00000000 },
647 {0x0000989c, 0x00000000 },
648 {0x0000989c, 0x00000000 },
649 {0x0000989c, 0x00000000 },
650 {0x0000989c, 0x00000000 },
651 {0x0000989c, 0x00000000 },
652 {0x0000989c, 0x00000000 },
653 {0x0000989c, 0x00000000 },
654 {0x0000989c, 0x00000000 },
655 {0x0000989c, 0x00000000 },
656 {0x0000989c, 0x00000000 },
657 {0x0000989c, 0x00000058 },
658 {0x0000989c, 0x00000000 },
659 {0x0000989c, 0x00000000 },
660 {0x0000989c, 0x00000000 },
661 {0x0000989c, 0x00000000 },
662 {0x000098cc, 0x00000000 },
663};
664
665static const u32 ar5416Modes_9100[][6] = {
666 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
667 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
668 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
669 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
670 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
671 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
672 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
673 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
674 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
675 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
676 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
677 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
678 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
679 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
680 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
681 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
682 { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 },
683 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec86d2e, 0x7ec84d2e, 0x7ec82d2e },
684 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
685 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
686 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
687 { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
688 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
689 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
690 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
691 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a11, 0xd00a8a0d, 0xd00a8a0d },
692 { 0x00009940, 0x00754604, 0x00754604, 0xfff81204, 0xfff81204, 0xfff81204 },
693 { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
694 { 0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e, 0xe250a51e },
695 { 0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff, 0x3388ffff },
696#ifdef TB243
697 { 0x00009960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
698 { 0x0000a960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
699 { 0x0000b960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
700 { 0x00009964, 0x00000000, 0x00000000, 0x00002210, 0x00002210, 0x00001120 },
701#else
702 { 0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
703 { 0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
704 { 0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
705 { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
706#endif
707 { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00, 0x001a0c00 },
708 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
709 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
710 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
711 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
712 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
713 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
714 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
715 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
716 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
717 { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
718 { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
719 { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
720 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
721 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
722 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
723 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
724 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
725 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
726 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
727 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
728 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
729 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
730 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
731 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
732 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
733 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
734 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
735 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
736 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
737};
738
739static const u32 ar5416Common_9100[][2] = {
740 { 0x0000000c, 0x00000000 },
741 { 0x00000030, 0x00020015 },
742 { 0x00000034, 0x00000005 },
743 { 0x00000040, 0x00000000 },
744 { 0x00000044, 0x00000008 },
745 { 0x00000048, 0x00000008 },
746 { 0x0000004c, 0x00000010 },
747 { 0x00000050, 0x00000000 },
748 { 0x00000054, 0x0000001f },
749 { 0x00000800, 0x00000000 },
750 { 0x00000804, 0x00000000 },
751 { 0x00000808, 0x00000000 },
752 { 0x0000080c, 0x00000000 },
753 { 0x00000810, 0x00000000 },
754 { 0x00000814, 0x00000000 },
755 { 0x00000818, 0x00000000 },
756 { 0x0000081c, 0x00000000 },
757 { 0x00000820, 0x00000000 },
758 { 0x00000824, 0x00000000 },
759 { 0x00001040, 0x002ffc0f },
760 { 0x00001044, 0x002ffc0f },
761 { 0x00001048, 0x002ffc0f },
762 { 0x0000104c, 0x002ffc0f },
763 { 0x00001050, 0x002ffc0f },
764 { 0x00001054, 0x002ffc0f },
765 { 0x00001058, 0x002ffc0f },
766 { 0x0000105c, 0x002ffc0f },
767 { 0x00001060, 0x002ffc0f },
768 { 0x00001064, 0x002ffc0f },
769 { 0x00001230, 0x00000000 },
770 { 0x00001270, 0x00000000 },
771 { 0x00001038, 0x00000000 },
772 { 0x00001078, 0x00000000 },
773 { 0x000010b8, 0x00000000 },
774 { 0x000010f8, 0x00000000 },
775 { 0x00001138, 0x00000000 },
776 { 0x00001178, 0x00000000 },
777 { 0x000011b8, 0x00000000 },
778 { 0x000011f8, 0x00000000 },
779 { 0x00001238, 0x00000000 },
780 { 0x00001278, 0x00000000 },
781 { 0x000012b8, 0x00000000 },
782 { 0x000012f8, 0x00000000 },
783 { 0x00001338, 0x00000000 },
784 { 0x00001378, 0x00000000 },
785 { 0x000013b8, 0x00000000 },
786 { 0x000013f8, 0x00000000 },
787 { 0x00001438, 0x00000000 },
788 { 0x00001478, 0x00000000 },
789 { 0x000014b8, 0x00000000 },
790 { 0x000014f8, 0x00000000 },
791 { 0x00001538, 0x00000000 },
792 { 0x00001578, 0x00000000 },
793 { 0x000015b8, 0x00000000 },
794 { 0x000015f8, 0x00000000 },
795 { 0x00001638, 0x00000000 },
796 { 0x00001678, 0x00000000 },
797 { 0x000016b8, 0x00000000 },
798 { 0x000016f8, 0x00000000 },
799 { 0x00001738, 0x00000000 },
800 { 0x00001778, 0x00000000 },
801 { 0x000017b8, 0x00000000 },
802 { 0x000017f8, 0x00000000 },
803 { 0x0000103c, 0x00000000 },
804 { 0x0000107c, 0x00000000 },
805 { 0x000010bc, 0x00000000 },
806 { 0x000010fc, 0x00000000 },
807 { 0x0000113c, 0x00000000 },
808 { 0x0000117c, 0x00000000 },
809 { 0x000011bc, 0x00000000 },
810 { 0x000011fc, 0x00000000 },
811 { 0x0000123c, 0x00000000 },
812 { 0x0000127c, 0x00000000 },
813 { 0x000012bc, 0x00000000 },
814 { 0x000012fc, 0x00000000 },
815 { 0x0000133c, 0x00000000 },
816 { 0x0000137c, 0x00000000 },
817 { 0x000013bc, 0x00000000 },
818 { 0x000013fc, 0x00000000 },
819 { 0x0000143c, 0x00000000 },
820 { 0x0000147c, 0x00000000 },
821 { 0x00020010, 0x00000003 },
822 { 0x00020038, 0x000004c2 },
823 { 0x00008004, 0x00000000 },
824 { 0x00008008, 0x00000000 },
825 { 0x0000800c, 0x00000000 },
826 { 0x00008018, 0x00000700 },
827 { 0x00008020, 0x00000000 },
828 { 0x00008038, 0x00000000 },
829 { 0x0000803c, 0x00000000 },
830 { 0x00008048, 0x40000000 },
831 { 0x00008054, 0x00004000 },
832 { 0x00008058, 0x00000000 },
833 { 0x0000805c, 0x000fc78f },
834 { 0x00008060, 0x0000000f },
835 { 0x00008064, 0x00000000 },
836 { 0x000080c0, 0x2a82301a },
837 { 0x000080c4, 0x05dc01e0 },
838 { 0x000080c8, 0x1f402710 },
839 { 0x000080cc, 0x01f40000 },
840 { 0x000080d0, 0x00001e00 },
841 { 0x000080d4, 0x00000000 },
842 { 0x000080d8, 0x00400000 },
843 { 0x000080e0, 0xffffffff },
844 { 0x000080e4, 0x0000ffff },
845 { 0x000080e8, 0x003f3f3f },
846 { 0x000080ec, 0x00000000 },
847 { 0x000080f0, 0x00000000 },
848 { 0x000080f4, 0x00000000 },
849 { 0x000080f8, 0x00000000 },
850 { 0x000080fc, 0x00020000 },
851 { 0x00008100, 0x00020000 },
852 { 0x00008104, 0x00000001 },
853 { 0x00008108, 0x00000052 },
854 { 0x0000810c, 0x00000000 },
855 { 0x00008110, 0x00000168 },
856 { 0x00008118, 0x000100aa },
857 { 0x0000811c, 0x00003210 },
858 { 0x00008120, 0x08f04800 },
859 { 0x00008124, 0x00000000 },
860 { 0x00008128, 0x00000000 },
861 { 0x0000812c, 0x00000000 },
862 { 0x00008130, 0x00000000 },
863 { 0x00008134, 0x00000000 },
864 { 0x00008138, 0x00000000 },
865 { 0x0000813c, 0x00000000 },
866 { 0x00008144, 0x00000000 },
867 { 0x00008168, 0x00000000 },
868 { 0x0000816c, 0x00000000 },
869 { 0x00008170, 0x32143320 },
870 { 0x00008174, 0xfaa4fa50 },
871 { 0x00008178, 0x00000100 },
872 { 0x0000817c, 0x00000000 },
873 { 0x000081c4, 0x00000000 },
874 { 0x000081d0, 0x00003210 },
875 { 0x000081ec, 0x00000000 },
876 { 0x000081f0, 0x00000000 },
877 { 0x000081f4, 0x00000000 },
878 { 0x000081f8, 0x00000000 },
879 { 0x000081fc, 0x00000000 },
880 { 0x00008200, 0x00000000 },
881 { 0x00008204, 0x00000000 },
882 { 0x00008208, 0x00000000 },
883 { 0x0000820c, 0x00000000 },
884 { 0x00008210, 0x00000000 },
885 { 0x00008214, 0x00000000 },
886 { 0x00008218, 0x00000000 },
887 { 0x0000821c, 0x00000000 },
888 { 0x00008220, 0x00000000 },
889 { 0x00008224, 0x00000000 },
890 { 0x00008228, 0x00000000 },
891 { 0x0000822c, 0x00000000 },
892 { 0x00008230, 0x00000000 },
893 { 0x00008234, 0x00000000 },
894 { 0x00008238, 0x00000000 },
895 { 0x0000823c, 0x00000000 },
896 { 0x00008240, 0x00100000 },
897 { 0x00008244, 0x0010f400 },
898 { 0x00008248, 0x00000100 },
899 { 0x0000824c, 0x0001e800 },
900 { 0x00008250, 0x00000000 },
901 { 0x00008254, 0x00000000 },
902 { 0x00008258, 0x00000000 },
903 { 0x0000825c, 0x400000ff },
904 { 0x00008260, 0x00080922 },
905 { 0x00008270, 0x00000000 },
906 { 0x00008274, 0x40000000 },
907 { 0x00008278, 0x003e4180 },
908 { 0x0000827c, 0x00000000 },
909 { 0x00008284, 0x0000002c },
910 { 0x00008288, 0x0000002c },
911 { 0x0000828c, 0x00000000 },
912 { 0x00008294, 0x00000000 },
913 { 0x00008298, 0x00000000 },
914 { 0x00008300, 0x00000000 },
915 { 0x00008304, 0x00000000 },
916 { 0x00008308, 0x00000000 },
917 { 0x0000830c, 0x00000000 },
918 { 0x00008310, 0x00000000 },
919 { 0x00008314, 0x00000000 },
920 { 0x00008318, 0x00000000 },
921 { 0x00008328, 0x00000000 },
922 { 0x0000832c, 0x00000007 },
923 { 0x00008330, 0x00000302 },
924 { 0x00008334, 0x00000e00 },
925 { 0x00008338, 0x00000000 },
926 { 0x0000833c, 0x00000000 },
927 { 0x00008340, 0x000107ff },
928 { 0x00009808, 0x00000000 },
929 { 0x0000980c, 0xad848e19 },
930 { 0x00009810, 0x7d14e000 },
931 { 0x00009814, 0x9c0a9f6b },
932 { 0x0000981c, 0x00000000 },
933 { 0x0000982c, 0x0000a000 },
934 { 0x00009830, 0x00000000 },
935 { 0x0000983c, 0x00200400 },
936 { 0x00009840, 0x206a01ae },
937 { 0x0000984c, 0x1284233c },
938 { 0x00009854, 0x00000859 },
939 { 0x00009900, 0x00000000 },
940 { 0x00009904, 0x00000000 },
941 { 0x00009908, 0x00000000 },
942 { 0x0000990c, 0x00000000 },
943 { 0x0000991c, 0x10000fff },
944 { 0x00009920, 0x05100000 },
945 { 0x0000a920, 0x05100000 },
946 { 0x0000b920, 0x05100000 },
947 { 0x00009928, 0x00000001 },
948 { 0x0000992c, 0x00000004 },
949 { 0x00009934, 0x1e1f2022 },
950 { 0x00009938, 0x0a0b0c0d },
951 { 0x0000993c, 0x00000000 },
952 { 0x00009948, 0x9280b212 },
953 { 0x0000994c, 0x00020028 },
954 { 0x0000c95c, 0x004b6a8e },
955 { 0x0000c968, 0x000003ce },
956 { 0x00009970, 0x190fb515 },
957 { 0x00009974, 0x00000000 },
958 { 0x00009978, 0x00000001 },
959 { 0x0000997c, 0x00000000 },
960 { 0x00009980, 0x00000000 },
961 { 0x00009984, 0x00000000 },
962 { 0x00009988, 0x00000000 },
963 { 0x0000998c, 0x00000000 },
964 { 0x00009990, 0x00000000 },
965 { 0x00009994, 0x00000000 },
966 { 0x00009998, 0x00000000 },
967 { 0x0000999c, 0x00000000 },
968 { 0x000099a0, 0x00000000 },
969 { 0x000099a4, 0x00000001 },
970 { 0x000099a8, 0x201fff00 },
971 { 0x000099ac, 0x006f0000 },
972 { 0x000099b0, 0x03051000 },
973 { 0x000099dc, 0x00000000 },
974 { 0x000099e0, 0x00000200 },
975 { 0x000099e4, 0xaaaaaaaa },
976 { 0x000099e8, 0x3c466478 },
977 { 0x000099ec, 0x0cc80caa },
978 { 0x000099fc, 0x00001042 },
979 { 0x00009b00, 0x00000000 },
980 { 0x00009b04, 0x00000001 },
981 { 0x00009b08, 0x00000002 },
982 { 0x00009b0c, 0x00000003 },
983 { 0x00009b10, 0x00000004 },
984 { 0x00009b14, 0x00000005 },
985 { 0x00009b18, 0x00000008 },
986 { 0x00009b1c, 0x00000009 },
987 { 0x00009b20, 0x0000000a },
988 { 0x00009b24, 0x0000000b },
989 { 0x00009b28, 0x0000000c },
990 { 0x00009b2c, 0x0000000d },
991 { 0x00009b30, 0x00000010 },
992 { 0x00009b34, 0x00000011 },
993 { 0x00009b38, 0x00000012 },
994 { 0x00009b3c, 0x00000013 },
995 { 0x00009b40, 0x00000014 },
996 { 0x00009b44, 0x00000015 },
997 { 0x00009b48, 0x00000018 },
998 { 0x00009b4c, 0x00000019 },
999 { 0x00009b50, 0x0000001a },
1000 { 0x00009b54, 0x0000001b },
1001 { 0x00009b58, 0x0000001c },
1002 { 0x00009b5c, 0x0000001d },
1003 { 0x00009b60, 0x00000020 },
1004 { 0x00009b64, 0x00000021 },
1005 { 0x00009b68, 0x00000022 },
1006 { 0x00009b6c, 0x00000023 },
1007 { 0x00009b70, 0x00000024 },
1008 { 0x00009b74, 0x00000025 },
1009 { 0x00009b78, 0x00000028 },
1010 { 0x00009b7c, 0x00000029 },
1011 { 0x00009b80, 0x0000002a },
1012 { 0x00009b84, 0x0000002b },
1013 { 0x00009b88, 0x0000002c },
1014 { 0x00009b8c, 0x0000002d },
1015 { 0x00009b90, 0x00000030 },
1016 { 0x00009b94, 0x00000031 },
1017 { 0x00009b98, 0x00000032 },
1018 { 0x00009b9c, 0x00000033 },
1019 { 0x00009ba0, 0x00000034 },
1020 { 0x00009ba4, 0x00000035 },
1021 { 0x00009ba8, 0x00000035 },
1022 { 0x00009bac, 0x00000035 },
1023 { 0x00009bb0, 0x00000035 },
1024 { 0x00009bb4, 0x00000035 },
1025 { 0x00009bb8, 0x00000035 },
1026 { 0x00009bbc, 0x00000035 },
1027 { 0x00009bc0, 0x00000035 },
1028 { 0x00009bc4, 0x00000035 },
1029 { 0x00009bc8, 0x00000035 },
1030 { 0x00009bcc, 0x00000035 },
1031 { 0x00009bd0, 0x00000035 },
1032 { 0x00009bd4, 0x00000035 },
1033 { 0x00009bd8, 0x00000035 },
1034 { 0x00009bdc, 0x00000035 },
1035 { 0x00009be0, 0x00000035 },
1036 { 0x00009be4, 0x00000035 },
1037 { 0x00009be8, 0x00000035 },
1038 { 0x00009bec, 0x00000035 },
1039 { 0x00009bf0, 0x00000035 },
1040 { 0x00009bf4, 0x00000035 },
1041 { 0x00009bf8, 0x00000010 },
1042 { 0x00009bfc, 0x0000001a },
1043 { 0x0000a210, 0x40806333 },
1044 { 0x0000a214, 0x00106c10 },
1045 { 0x0000a218, 0x009c4060 },
1046 { 0x0000a220, 0x018830c6 },
1047 { 0x0000a224, 0x00000400 },
1048 { 0x0000a228, 0x001a0bb5 },
1049 { 0x0000a22c, 0x00000000 },
1050 { 0x0000a234, 0x20202020 },
1051 { 0x0000a238, 0x20202020 },
1052 { 0x0000a23c, 0x13c889ae },
1053 { 0x0000a240, 0x38490a20 },
1054 { 0x0000a244, 0x00007bb6 },
1055 { 0x0000a248, 0x0fff3ffc },
1056 { 0x0000a24c, 0x00000001 },
1057 { 0x0000a250, 0x0000a000 },
1058 { 0x0000a254, 0x00000000 },
1059 { 0x0000a258, 0x0cc75380 },
1060 { 0x0000a25c, 0x0f0f0f01 },
1061 { 0x0000a260, 0xdfa91f01 },
1062 { 0x0000a268, 0x00000001 },
1063 { 0x0000a26c, 0x0ebae9c6 },
1064 { 0x0000b26c, 0x0ebae9c6 },
1065 { 0x0000c26c, 0x0ebae9c6 },
1066 { 0x0000d270, 0x00820820 },
1067 { 0x0000a278, 0x1ce739ce },
1068 { 0x0000a27c, 0x050701ce },
1069 { 0x0000a338, 0x00000000 },
1070 { 0x0000a33c, 0x00000000 },
1071 { 0x0000a340, 0x00000000 },
1072 { 0x0000a344, 0x00000000 },
1073 { 0x0000a348, 0x3fffffff },
1074 { 0x0000a34c, 0x3fffffff },
1075 { 0x0000a350, 0x3fffffff },
1076 { 0x0000a354, 0x0003ffff },
1077 { 0x0000a358, 0x79a8aa33 },
1078 { 0x0000d35c, 0x07ffffef },
1079 { 0x0000d360, 0x0fffffe7 },
1080 { 0x0000d364, 0x17ffffe5 },
1081 { 0x0000d368, 0x1fffffe4 },
1082 { 0x0000d36c, 0x37ffffe3 },
1083 { 0x0000d370, 0x3fffffe3 },
1084 { 0x0000d374, 0x57ffffe3 },
1085 { 0x0000d378, 0x5fffffe2 },
1086 { 0x0000d37c, 0x7fffffe2 },
1087 { 0x0000d380, 0x7f3c7bba },
1088 { 0x0000d384, 0xf3307ff0 },
1089 { 0x0000a388, 0x0c000000 },
1090 { 0x0000a38c, 0x20202020 },
1091 { 0x0000a390, 0x20202020 },
1092 { 0x0000a394, 0x1ce739ce },
1093 { 0x0000a398, 0x000001ce },
1094 { 0x0000a39c, 0x00000001 },
1095 { 0x0000a3a0, 0x00000000 },
1096 { 0x0000a3a4, 0x00000000 },
1097 { 0x0000a3a8, 0x00000000 },
1098 { 0x0000a3ac, 0x00000000 },
1099 { 0x0000a3b0, 0x00000000 },
1100 { 0x0000a3b4, 0x00000000 },
1101 { 0x0000a3b8, 0x00000000 },
1102 { 0x0000a3bc, 0x00000000 },
1103 { 0x0000a3c0, 0x00000000 },
1104 { 0x0000a3c4, 0x00000000 },
1105 { 0x0000a3c8, 0x00000246 },
1106 { 0x0000a3cc, 0x20202020 },
1107 { 0x0000a3d0, 0x20202020 },
1108 { 0x0000a3d4, 0x20202020 },
1109 { 0x0000a3dc, 0x1ce739ce },
1110 { 0x0000a3e0, 0x000001ce },
1111};
1112
1113static const u32 ar5416Bank0_9100[][2] = {
1114 { 0x000098b0, 0x1e5795e5 },
1115 { 0x000098e0, 0x02008020 },
1116};
1117
1118static const u32 ar5416BB_RfGain_9100[][3] = {
1119 { 0x00009a00, 0x00000000, 0x00000000 },
1120 { 0x00009a04, 0x00000040, 0x00000040 },
1121 { 0x00009a08, 0x00000080, 0x00000080 },
1122 { 0x00009a0c, 0x000001a1, 0x00000141 },
1123 { 0x00009a10, 0x000001e1, 0x00000181 },
1124 { 0x00009a14, 0x00000021, 0x000001c1 },
1125 { 0x00009a18, 0x00000061, 0x00000001 },
1126 { 0x00009a1c, 0x00000168, 0x00000041 },
1127 { 0x00009a20, 0x000001a8, 0x000001a8 },
1128 { 0x00009a24, 0x000001e8, 0x000001e8 },
1129 { 0x00009a28, 0x00000028, 0x00000028 },
1130 { 0x00009a2c, 0x00000068, 0x00000068 },
1131 { 0x00009a30, 0x00000189, 0x000000a8 },
1132 { 0x00009a34, 0x000001c9, 0x00000169 },
1133 { 0x00009a38, 0x00000009, 0x000001a9 },
1134 { 0x00009a3c, 0x00000049, 0x000001e9 },
1135 { 0x00009a40, 0x00000089, 0x00000029 },
1136 { 0x00009a44, 0x00000170, 0x00000069 },
1137 { 0x00009a48, 0x000001b0, 0x00000190 },
1138 { 0x00009a4c, 0x000001f0, 0x000001d0 },
1139 { 0x00009a50, 0x00000030, 0x00000010 },
1140 { 0x00009a54, 0x00000070, 0x00000050 },
1141 { 0x00009a58, 0x00000191, 0x00000090 },
1142 { 0x00009a5c, 0x000001d1, 0x00000151 },
1143 { 0x00009a60, 0x00000011, 0x00000191 },
1144 { 0x00009a64, 0x00000051, 0x000001d1 },
1145 { 0x00009a68, 0x00000091, 0x00000011 },
1146 { 0x00009a6c, 0x000001b8, 0x00000051 },
1147 { 0x00009a70, 0x000001f8, 0x00000198 },
1148 { 0x00009a74, 0x00000038, 0x000001d8 },
1149 { 0x00009a78, 0x00000078, 0x00000018 },
1150 { 0x00009a7c, 0x00000199, 0x00000058 },
1151 { 0x00009a80, 0x000001d9, 0x00000098 },
1152 { 0x00009a84, 0x00000019, 0x00000159 },
1153 { 0x00009a88, 0x00000059, 0x00000199 },
1154 { 0x00009a8c, 0x00000099, 0x000001d9 },
1155 { 0x00009a90, 0x000000d9, 0x00000019 },
1156 { 0x00009a94, 0x000000f9, 0x00000059 },
1157 { 0x00009a98, 0x000000f9, 0x00000099 },
1158 { 0x00009a9c, 0x000000f9, 0x000000d9 },
1159 { 0x00009aa0, 0x000000f9, 0x000000f9 },
1160 { 0x00009aa4, 0x000000f9, 0x000000f9 },
1161 { 0x00009aa8, 0x000000f9, 0x000000f9 },
1162 { 0x00009aac, 0x000000f9, 0x000000f9 },
1163 { 0x00009ab0, 0x000000f9, 0x000000f9 },
1164 { 0x00009ab4, 0x000000f9, 0x000000f9 },
1165 { 0x00009ab8, 0x000000f9, 0x000000f9 },
1166 { 0x00009abc, 0x000000f9, 0x000000f9 },
1167 { 0x00009ac0, 0x000000f9, 0x000000f9 },
1168 { 0x00009ac4, 0x000000f9, 0x000000f9 },
1169 { 0x00009ac8, 0x000000f9, 0x000000f9 },
1170 { 0x00009acc, 0x000000f9, 0x000000f9 },
1171 { 0x00009ad0, 0x000000f9, 0x000000f9 },
1172 { 0x00009ad4, 0x000000f9, 0x000000f9 },
1173 { 0x00009ad8, 0x000000f9, 0x000000f9 },
1174 { 0x00009adc, 0x000000f9, 0x000000f9 },
1175 { 0x00009ae0, 0x000000f9, 0x000000f9 },
1176 { 0x00009ae4, 0x000000f9, 0x000000f9 },
1177 { 0x00009ae8, 0x000000f9, 0x000000f9 },
1178 { 0x00009aec, 0x000000f9, 0x000000f9 },
1179 { 0x00009af0, 0x000000f9, 0x000000f9 },
1180 { 0x00009af4, 0x000000f9, 0x000000f9 },
1181 { 0x00009af8, 0x000000f9, 0x000000f9 },
1182 { 0x00009afc, 0x000000f9, 0x000000f9 },
1183};
1184
1185static const u32 ar5416Bank1_9100[][2] = {
1186 { 0x000098b0, 0x02108421},
1187 { 0x000098ec, 0x00000008},
1188};
1189
1190static const u32 ar5416Bank2_9100[][2] = {
1191 { 0x000098b0, 0x0e73ff17},
1192 { 0x000098e0, 0x00000420},
1193};
1194
1195static const u32 ar5416Bank3_9100[][3] = {
1196 { 0x000098f0, 0x01400018, 0x01c00018 },
1197};
1198
1199static const u32 ar5416Bank6_9100[][3] = {
1200
1201 { 0x0000989c, 0x00000000, 0x00000000 },
1202 { 0x0000989c, 0x00000000, 0x00000000 },
1203 { 0x0000989c, 0x00000000, 0x00000000 },
1204 { 0x0000989c, 0x00e00000, 0x00e00000 },
1205 { 0x0000989c, 0x005e0000, 0x005e0000 },
1206 { 0x0000989c, 0x00120000, 0x00120000 },
1207 { 0x0000989c, 0x00620000, 0x00620000 },
1208 { 0x0000989c, 0x00020000, 0x00020000 },
1209 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1210 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1211 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1212 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1213 { 0x0000989c, 0x005f0000, 0x005f0000 },
1214 { 0x0000989c, 0x00870000, 0x00870000 },
1215 { 0x0000989c, 0x00f90000, 0x00f90000 },
1216 { 0x0000989c, 0x007b0000, 0x007b0000 },
1217 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1218 { 0x0000989c, 0x00f50000, 0x00f50000 },
1219 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1220 { 0x0000989c, 0x00110000, 0x00110000 },
1221 { 0x0000989c, 0x006100a8, 0x006100a8 },
1222 { 0x0000989c, 0x004210a2, 0x004210a2 },
1223 { 0x0000989c, 0x0014000f, 0x0014000f },
1224 { 0x0000989c, 0x00c40002, 0x00c40002 },
1225 { 0x0000989c, 0x003000f2, 0x003000f2 },
1226 { 0x0000989c, 0x00440016, 0x00440016 },
1227 { 0x0000989c, 0x00410040, 0x00410040 },
1228 { 0x0000989c, 0x000180d6, 0x000180d6 },
1229 { 0x0000989c, 0x0000c0aa, 0x0000c0aa },
1230 { 0x0000989c, 0x000000b1, 0x000000b1 },
1231 { 0x0000989c, 0x00002000, 0x00002000 },
1232 { 0x0000989c, 0x000000d4, 0x000000d4 },
1233 { 0x000098d0, 0x0000000f, 0x0010000f },
1234};
1235
1236
1237static const u32 ar5416Bank6TPC_9100[][3] = {
1238
1239 { 0x0000989c, 0x00000000, 0x00000000 },
1240 { 0x0000989c, 0x00000000, 0x00000000 },
1241 { 0x0000989c, 0x00000000, 0x00000000 },
1242 { 0x0000989c, 0x00e00000, 0x00e00000 },
1243 { 0x0000989c, 0x005e0000, 0x005e0000 },
1244 { 0x0000989c, 0x00120000, 0x00120000 },
1245 { 0x0000989c, 0x00620000, 0x00620000 },
1246 { 0x0000989c, 0x00020000, 0x00020000 },
1247 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1248 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1249 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1250 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
1251 { 0x0000989c, 0x005f0000, 0x005f0000 },
1252 { 0x0000989c, 0x00870000, 0x00870000 },
1253 { 0x0000989c, 0x00f90000, 0x00f90000 },
1254 { 0x0000989c, 0x007b0000, 0x007b0000 },
1255 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1256 { 0x0000989c, 0x00f50000, 0x00f50000 },
1257 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1258 { 0x0000989c, 0x00110000, 0x00110000 },
1259 { 0x0000989c, 0x006100a8, 0x006100a8 },
1260 { 0x0000989c, 0x00423022, 0x00423022 },
1261 { 0x0000989c, 0x2014008f, 0x2014008f },
1262 { 0x0000989c, 0x00c40002, 0x00c40002 },
1263 { 0x0000989c, 0x003000f2, 0x003000f2 },
1264 { 0x0000989c, 0x00440016, 0x00440016 },
1265 { 0x0000989c, 0x00410040, 0x00410040 },
1266 { 0x0000989c, 0x0001805e, 0x0001805e },
1267 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
1268 { 0x0000989c, 0x000000e1, 0x000000e1 },
1269 { 0x0000989c, 0x00007080, 0x00007080 },
1270 { 0x0000989c, 0x000000d4, 0x000000d4 },
1271 { 0x000098d0, 0x0000000f, 0x0010000f },
1272};
1273
1274static const u32 ar5416Bank7_9100[][2] = {
1275 { 0x0000989c, 0x00000500 },
1276 { 0x0000989c, 0x00000800 },
1277 { 0x000098cc, 0x0000000e },
1278};
1279
1280static const u32 ar5416Addac_9100[][2] = {
1281 {0x0000989c, 0x00000000 },
1282 {0x0000989c, 0x00000000 },
1283 {0x0000989c, 0x00000000 },
1284 {0x0000989c, 0x00000000 },
1285 {0x0000989c, 0x00000000 },
1286 {0x0000989c, 0x00000000 },
1287 {0x0000989c, 0x00000000 },
1288 {0x0000989c, 0x00000010 },
1289 {0x0000989c, 0x00000000 },
1290 {0x0000989c, 0x00000000 },
1291 {0x0000989c, 0x00000000 },
1292 {0x0000989c, 0x00000000 },
1293 {0x0000989c, 0x00000000 },
1294 {0x0000989c, 0x00000000 },
1295 {0x0000989c, 0x00000000 },
1296 {0x0000989c, 0x00000000 },
1297 {0x0000989c, 0x00000000 },
1298 {0x0000989c, 0x00000000 },
1299 {0x0000989c, 0x00000000 },
1300 {0x0000989c, 0x00000000 },
1301 {0x0000989c, 0x00000000 },
1302 {0x0000989c, 0x000000c0 },
1303 {0x0000989c, 0x00000015 },
1304 {0x0000989c, 0x00000000 },
1305 {0x0000989c, 0x00000000 },
1306 {0x0000989c, 0x00000000 },
1307 {0x0000989c, 0x00000000 },
1308 {0x0000989c, 0x00000000 },
1309 {0x0000989c, 0x00000000 },
1310 {0x0000989c, 0x00000000 },
1311 {0x0000989c, 0x00000000 },
1312 {0x000098cc, 0x00000000 },
1313};
1314
1315static const u32 ar5416Modes_9160[][6] = {
1316 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
1317 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
1318 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
1319 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
1320 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
1321 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
1322 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
1323 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
1324 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
1325 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
1326 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
1327 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
1328 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
1329 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
1330 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
1331 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
1332 { 0x00009850, 0x6c48b4e2, 0x6c48b4e2, 0x6c48b0e2, 0x6c48b0e2, 0x6c48b0e2 },
1333 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
1334 { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e },
1335 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
1336 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
1337 { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
1338 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
1339 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
1340 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
1341 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
1342 { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
1343 { 0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
1344 { 0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
1345 { 0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
1346 { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
1347 { 0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce },
1348 { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00 },
1349 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
1350 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
1351 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
1352 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
1353 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
1354 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1355 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1356 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
1357 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
1358 { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
1359 { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
1360 { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
1361 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
1362 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
1363 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
1364 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
1365 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
1366 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
1367 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
1368 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
1369 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
1370 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
1371 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
1372 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
1373 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
1374 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
1375 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1376 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1377 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1378};
1379 19
1380static const u32 ar5416Common_9160[][2] = {
1381 { 0x0000000c, 0x00000000 },
1382 { 0x00000030, 0x00020015 },
1383 { 0x00000034, 0x00000005 },
1384 { 0x00000040, 0x00000000 },
1385 { 0x00000044, 0x00000008 },
1386 { 0x00000048, 0x00000008 },
1387 { 0x0000004c, 0x00000010 },
1388 { 0x00000050, 0x00000000 },
1389 { 0x00000054, 0x0000001f },
1390 { 0x00000800, 0x00000000 },
1391 { 0x00000804, 0x00000000 },
1392 { 0x00000808, 0x00000000 },
1393 { 0x0000080c, 0x00000000 },
1394 { 0x00000810, 0x00000000 },
1395 { 0x00000814, 0x00000000 },
1396 { 0x00000818, 0x00000000 },
1397 { 0x0000081c, 0x00000000 },
1398 { 0x00000820, 0x00000000 },
1399 { 0x00000824, 0x00000000 },
1400 { 0x00001040, 0x002ffc0f },
1401 { 0x00001044, 0x002ffc0f },
1402 { 0x00001048, 0x002ffc0f },
1403 { 0x0000104c, 0x002ffc0f },
1404 { 0x00001050, 0x002ffc0f },
1405 { 0x00001054, 0x002ffc0f },
1406 { 0x00001058, 0x002ffc0f },
1407 { 0x0000105c, 0x002ffc0f },
1408 { 0x00001060, 0x002ffc0f },
1409 { 0x00001064, 0x002ffc0f },
1410 { 0x00001230, 0x00000000 },
1411 { 0x00001270, 0x00000000 },
1412 { 0x00001038, 0x00000000 },
1413 { 0x00001078, 0x00000000 },
1414 { 0x000010b8, 0x00000000 },
1415 { 0x000010f8, 0x00000000 },
1416 { 0x00001138, 0x00000000 },
1417 { 0x00001178, 0x00000000 },
1418 { 0x000011b8, 0x00000000 },
1419 { 0x000011f8, 0x00000000 },
1420 { 0x00001238, 0x00000000 },
1421 { 0x00001278, 0x00000000 },
1422 { 0x000012b8, 0x00000000 },
1423 { 0x000012f8, 0x00000000 },
1424 { 0x00001338, 0x00000000 },
1425 { 0x00001378, 0x00000000 },
1426 { 0x000013b8, 0x00000000 },
1427 { 0x000013f8, 0x00000000 },
1428 { 0x00001438, 0x00000000 },
1429 { 0x00001478, 0x00000000 },
1430 { 0x000014b8, 0x00000000 },
1431 { 0x000014f8, 0x00000000 },
1432 { 0x00001538, 0x00000000 },
1433 { 0x00001578, 0x00000000 },
1434 { 0x000015b8, 0x00000000 },
1435 { 0x000015f8, 0x00000000 },
1436 { 0x00001638, 0x00000000 },
1437 { 0x00001678, 0x00000000 },
1438 { 0x000016b8, 0x00000000 },
1439 { 0x000016f8, 0x00000000 },
1440 { 0x00001738, 0x00000000 },
1441 { 0x00001778, 0x00000000 },
1442 { 0x000017b8, 0x00000000 },
1443 { 0x000017f8, 0x00000000 },
1444 { 0x0000103c, 0x00000000 },
1445 { 0x0000107c, 0x00000000 },
1446 { 0x000010bc, 0x00000000 },
1447 { 0x000010fc, 0x00000000 },
1448 { 0x0000113c, 0x00000000 },
1449 { 0x0000117c, 0x00000000 },
1450 { 0x000011bc, 0x00000000 },
1451 { 0x000011fc, 0x00000000 },
1452 { 0x0000123c, 0x00000000 },
1453 { 0x0000127c, 0x00000000 },
1454 { 0x000012bc, 0x00000000 },
1455 { 0x000012fc, 0x00000000 },
1456 { 0x0000133c, 0x00000000 },
1457 { 0x0000137c, 0x00000000 },
1458 { 0x000013bc, 0x00000000 },
1459 { 0x000013fc, 0x00000000 },
1460 { 0x0000143c, 0x00000000 },
1461 { 0x0000147c, 0x00000000 },
1462 { 0x00004030, 0x00000002 },
1463 { 0x0000403c, 0x00000002 },
1464 { 0x00007010, 0x00000020 },
1465 { 0x00007038, 0x000004c2 },
1466 { 0x00008004, 0x00000000 },
1467 { 0x00008008, 0x00000000 },
1468 { 0x0000800c, 0x00000000 },
1469 { 0x00008018, 0x00000700 },
1470 { 0x00008020, 0x00000000 },
1471 { 0x00008038, 0x00000000 },
1472 { 0x0000803c, 0x00000000 },
1473 { 0x00008048, 0x40000000 },
1474 { 0x00008054, 0x00000000 },
1475 { 0x00008058, 0x00000000 },
1476 { 0x0000805c, 0x000fc78f },
1477 { 0x00008060, 0x0000000f },
1478 { 0x00008064, 0x00000000 },
1479 { 0x000080c0, 0x2a82301a },
1480 { 0x000080c4, 0x05dc01e0 },
1481 { 0x000080c8, 0x1f402710 },
1482 { 0x000080cc, 0x01f40000 },
1483 { 0x000080d0, 0x00001e00 },
1484 { 0x000080d4, 0x00000000 },
1485 { 0x000080d8, 0x00400000 },
1486 { 0x000080e0, 0xffffffff },
1487 { 0x000080e4, 0x0000ffff },
1488 { 0x000080e8, 0x003f3f3f },
1489 { 0x000080ec, 0x00000000 },
1490 { 0x000080f0, 0x00000000 },
1491 { 0x000080f4, 0x00000000 },
1492 { 0x000080f8, 0x00000000 },
1493 { 0x000080fc, 0x00020000 },
1494 { 0x00008100, 0x00020000 },
1495 { 0x00008104, 0x00000001 },
1496 { 0x00008108, 0x00000052 },
1497 { 0x0000810c, 0x00000000 },
1498 { 0x00008110, 0x00000168 },
1499 { 0x00008118, 0x000100aa },
1500 { 0x0000811c, 0x00003210 },
1501 { 0x00008120, 0x08f04800 },
1502 { 0x00008124, 0x00000000 },
1503 { 0x00008128, 0x00000000 },
1504 { 0x0000812c, 0x00000000 },
1505 { 0x00008130, 0x00000000 },
1506 { 0x00008134, 0x00000000 },
1507 { 0x00008138, 0x00000000 },
1508 { 0x0000813c, 0x00000000 },
1509 { 0x00008144, 0xffffffff },
1510 { 0x00008168, 0x00000000 },
1511 { 0x0000816c, 0x00000000 },
1512 { 0x00008170, 0x32143320 },
1513 { 0x00008174, 0xfaa4fa50 },
1514 { 0x00008178, 0x00000100 },
1515 { 0x0000817c, 0x00000000 },
1516 { 0x000081c4, 0x00000000 },
1517 { 0x000081d0, 0x00003210 },
1518 { 0x000081ec, 0x00000000 },
1519 { 0x000081f0, 0x00000000 },
1520 { 0x000081f4, 0x00000000 },
1521 { 0x000081f8, 0x00000000 },
1522 { 0x000081fc, 0x00000000 },
1523 { 0x00008200, 0x00000000 },
1524 { 0x00008204, 0x00000000 },
1525 { 0x00008208, 0x00000000 },
1526 { 0x0000820c, 0x00000000 },
1527 { 0x00008210, 0x00000000 },
1528 { 0x00008214, 0x00000000 },
1529 { 0x00008218, 0x00000000 },
1530 { 0x0000821c, 0x00000000 },
1531 { 0x00008220, 0x00000000 },
1532 { 0x00008224, 0x00000000 },
1533 { 0x00008228, 0x00000000 },
1534 { 0x0000822c, 0x00000000 },
1535 { 0x00008230, 0x00000000 },
1536 { 0x00008234, 0x00000000 },
1537 { 0x00008238, 0x00000000 },
1538 { 0x0000823c, 0x00000000 },
1539 { 0x00008240, 0x00100000 },
1540 { 0x00008244, 0x0010f400 },
1541 { 0x00008248, 0x00000100 },
1542 { 0x0000824c, 0x0001e800 },
1543 { 0x00008250, 0x00000000 },
1544 { 0x00008254, 0x00000000 },
1545 { 0x00008258, 0x00000000 },
1546 { 0x0000825c, 0x400000ff },
1547 { 0x00008260, 0x00080922 },
1548 { 0x00008270, 0x00000000 },
1549 { 0x00008274, 0x40000000 },
1550 { 0x00008278, 0x003e4180 },
1551 { 0x0000827c, 0x00000000 },
1552 { 0x00008284, 0x0000002c },
1553 { 0x00008288, 0x0000002c },
1554 { 0x0000828c, 0x00000000 },
1555 { 0x00008294, 0x00000000 },
1556 { 0x00008298, 0x00000000 },
1557 { 0x00008300, 0x00000000 },
1558 { 0x00008304, 0x00000000 },
1559 { 0x00008308, 0x00000000 },
1560 { 0x0000830c, 0x00000000 },
1561 { 0x00008310, 0x00000000 },
1562 { 0x00008314, 0x00000000 },
1563 { 0x00008318, 0x00000000 },
1564 { 0x00008328, 0x00000000 },
1565 { 0x0000832c, 0x00000007 },
1566 { 0x00008330, 0x00000302 },
1567 { 0x00008334, 0x00000e00 },
1568 { 0x00008338, 0x00ff0000 },
1569 { 0x0000833c, 0x00000000 },
1570 { 0x00008340, 0x000107ff },
1571 { 0x00009808, 0x00000000 },
1572 { 0x0000980c, 0xad848e19 },
1573 { 0x00009810, 0x7d14e000 },
1574 { 0x00009814, 0x9c0a9f6b },
1575 { 0x0000981c, 0x00000000 },
1576 { 0x0000982c, 0x0000a000 },
1577 { 0x00009830, 0x00000000 },
1578 { 0x0000983c, 0x00200400 },
1579 { 0x00009840, 0x206a01ae },
1580 { 0x0000984c, 0x1284233c },
1581 { 0x00009854, 0x00000859 },
1582 { 0x00009900, 0x00000000 },
1583 { 0x00009904, 0x00000000 },
1584 { 0x00009908, 0x00000000 },
1585 { 0x0000990c, 0x00000000 },
1586 { 0x0000991c, 0x10000fff },
1587 { 0x00009920, 0x05100000 },
1588 { 0x0000a920, 0x05100000 },
1589 { 0x0000b920, 0x05100000 },
1590 { 0x00009928, 0x00000001 },
1591 { 0x0000992c, 0x00000004 },
1592 { 0x00009934, 0x1e1f2022 },
1593 { 0x00009938, 0x0a0b0c0d },
1594 { 0x0000993c, 0x00000000 },
1595 { 0x00009948, 0x9280b212 },
1596 { 0x0000994c, 0x00020028 },
1597 { 0x00009954, 0x5f3ca3de },
1598 { 0x00009958, 0x2108ecff },
1599 { 0x00009940, 0x00750604 },
1600 { 0x0000c95c, 0x004b6a8e },
1601 { 0x00009970, 0x190fb515 },
1602 { 0x00009974, 0x00000000 },
1603 { 0x00009978, 0x00000001 },
1604 { 0x0000997c, 0x00000000 },
1605 { 0x00009980, 0x00000000 },
1606 { 0x00009984, 0x00000000 },
1607 { 0x00009988, 0x00000000 },
1608 { 0x0000998c, 0x00000000 },
1609 { 0x00009990, 0x00000000 },
1610 { 0x00009994, 0x00000000 },
1611 { 0x00009998, 0x00000000 },
1612 { 0x0000999c, 0x00000000 },
1613 { 0x000099a0, 0x00000000 },
1614 { 0x000099a4, 0x00000001 },
1615 { 0x000099a8, 0x201fff00 },
1616 { 0x000099ac, 0x006f0000 },
1617 { 0x000099b0, 0x03051000 },
1618 { 0x000099dc, 0x00000000 },
1619 { 0x000099e0, 0x00000200 },
1620 { 0x000099e4, 0xaaaaaaaa },
1621 { 0x000099e8, 0x3c466478 },
1622 { 0x000099ec, 0x0cc80caa },
1623 { 0x000099fc, 0x00001042 },
1624 { 0x00009b00, 0x00000000 },
1625 { 0x00009b04, 0x00000001 },
1626 { 0x00009b08, 0x00000002 },
1627 { 0x00009b0c, 0x00000003 },
1628 { 0x00009b10, 0x00000004 },
1629 { 0x00009b14, 0x00000005 },
1630 { 0x00009b18, 0x00000008 },
1631 { 0x00009b1c, 0x00000009 },
1632 { 0x00009b20, 0x0000000a },
1633 { 0x00009b24, 0x0000000b },
1634 { 0x00009b28, 0x0000000c },
1635 { 0x00009b2c, 0x0000000d },
1636 { 0x00009b30, 0x00000010 },
1637 { 0x00009b34, 0x00000011 },
1638 { 0x00009b38, 0x00000012 },
1639 { 0x00009b3c, 0x00000013 },
1640 { 0x00009b40, 0x00000014 },
1641 { 0x00009b44, 0x00000015 },
1642 { 0x00009b48, 0x00000018 },
1643 { 0x00009b4c, 0x00000019 },
1644 { 0x00009b50, 0x0000001a },
1645 { 0x00009b54, 0x0000001b },
1646 { 0x00009b58, 0x0000001c },
1647 { 0x00009b5c, 0x0000001d },
1648 { 0x00009b60, 0x00000020 },
1649 { 0x00009b64, 0x00000021 },
1650 { 0x00009b68, 0x00000022 },
1651 { 0x00009b6c, 0x00000023 },
1652 { 0x00009b70, 0x00000024 },
1653 { 0x00009b74, 0x00000025 },
1654 { 0x00009b78, 0x00000028 },
1655 { 0x00009b7c, 0x00000029 },
1656 { 0x00009b80, 0x0000002a },
1657 { 0x00009b84, 0x0000002b },
1658 { 0x00009b88, 0x0000002c },
1659 { 0x00009b8c, 0x0000002d },
1660 { 0x00009b90, 0x00000030 },
1661 { 0x00009b94, 0x00000031 },
1662 { 0x00009b98, 0x00000032 },
1663 { 0x00009b9c, 0x00000033 },
1664 { 0x00009ba0, 0x00000034 },
1665 { 0x00009ba4, 0x00000035 },
1666 { 0x00009ba8, 0x00000035 },
1667 { 0x00009bac, 0x00000035 },
1668 { 0x00009bb0, 0x00000035 },
1669 { 0x00009bb4, 0x00000035 },
1670 { 0x00009bb8, 0x00000035 },
1671 { 0x00009bbc, 0x00000035 },
1672 { 0x00009bc0, 0x00000035 },
1673 { 0x00009bc4, 0x00000035 },
1674 { 0x00009bc8, 0x00000035 },
1675 { 0x00009bcc, 0x00000035 },
1676 { 0x00009bd0, 0x00000035 },
1677 { 0x00009bd4, 0x00000035 },
1678 { 0x00009bd8, 0x00000035 },
1679 { 0x00009bdc, 0x00000035 },
1680 { 0x00009be0, 0x00000035 },
1681 { 0x00009be4, 0x00000035 },
1682 { 0x00009be8, 0x00000035 },
1683 { 0x00009bec, 0x00000035 },
1684 { 0x00009bf0, 0x00000035 },
1685 { 0x00009bf4, 0x00000035 },
1686 { 0x00009bf8, 0x00000010 },
1687 { 0x00009bfc, 0x0000001a },
1688 { 0x0000a210, 0x40806333 },
1689 { 0x0000a214, 0x00106c10 },
1690 { 0x0000a218, 0x009c4060 },
1691 { 0x0000a220, 0x018830c6 },
1692 { 0x0000a224, 0x00000400 },
1693 { 0x0000a228, 0x001a0bb5 },
1694 { 0x0000a22c, 0x00000000 },
1695 { 0x0000a234, 0x20202020 },
1696 { 0x0000a238, 0x20202020 },
1697 { 0x0000a23c, 0x13c889af },
1698 { 0x0000a240, 0x38490a20 },
1699 { 0x0000a244, 0x00007bb6 },
1700 { 0x0000a248, 0x0fff3ffc },
1701 { 0x0000a24c, 0x00000001 },
1702 { 0x0000a250, 0x0000e000 },
1703 { 0x0000a254, 0x00000000 },
1704 { 0x0000a258, 0x0cc75380 },
1705 { 0x0000a25c, 0x0f0f0f01 },
1706 { 0x0000a260, 0xdfa91f01 },
1707 { 0x0000a268, 0x00000001 },
1708 { 0x0000a26c, 0x0ebae9c6 },
1709 { 0x0000b26c, 0x0ebae9c6 },
1710 { 0x0000c26c, 0x0ebae9c6 },
1711 { 0x0000d270, 0x00820820 },
1712 { 0x0000a278, 0x1ce739ce },
1713 { 0x0000a27c, 0x050701ce },
1714 { 0x0000a338, 0x00000000 },
1715 { 0x0000a33c, 0x00000000 },
1716 { 0x0000a340, 0x00000000 },
1717 { 0x0000a344, 0x00000000 },
1718 { 0x0000a348, 0x3fffffff },
1719 { 0x0000a34c, 0x3fffffff },
1720 { 0x0000a350, 0x3fffffff },
1721 { 0x0000a354, 0x0003ffff },
1722 { 0x0000a358, 0x79bfaa03 },
1723 { 0x0000d35c, 0x07ffffef },
1724 { 0x0000d360, 0x0fffffe7 },
1725 { 0x0000d364, 0x17ffffe5 },
1726 { 0x0000d368, 0x1fffffe4 },
1727 { 0x0000d36c, 0x37ffffe3 },
1728 { 0x0000d370, 0x3fffffe3 },
1729 { 0x0000d374, 0x57ffffe3 },
1730 { 0x0000d378, 0x5fffffe2 },
1731 { 0x0000d37c, 0x7fffffe2 },
1732 { 0x0000d380, 0x7f3c7bba },
1733 { 0x0000d384, 0xf3307ff0 },
1734 { 0x0000a388, 0x0c000000 },
1735 { 0x0000a38c, 0x20202020 },
1736 { 0x0000a390, 0x20202020 },
1737 { 0x0000a394, 0x1ce739ce },
1738 { 0x0000a398, 0x000001ce },
1739 { 0x0000a39c, 0x00000001 },
1740 { 0x0000a3a0, 0x00000000 },
1741 { 0x0000a3a4, 0x00000000 },
1742 { 0x0000a3a8, 0x00000000 },
1743 { 0x0000a3ac, 0x00000000 },
1744 { 0x0000a3b0, 0x00000000 },
1745 { 0x0000a3b4, 0x00000000 },
1746 { 0x0000a3b8, 0x00000000 },
1747 { 0x0000a3bc, 0x00000000 },
1748 { 0x0000a3c0, 0x00000000 },
1749 { 0x0000a3c4, 0x00000000 },
1750 { 0x0000a3c8, 0x00000246 },
1751 { 0x0000a3cc, 0x20202020 },
1752 { 0x0000a3d0, 0x20202020 },
1753 { 0x0000a3d4, 0x20202020 },
1754 { 0x0000a3dc, 0x1ce739ce },
1755 { 0x0000a3e0, 0x000001ce },
1756};
1757
1758static const u32 ar5416Bank0_9160[][2] = {
1759 { 0x000098b0, 0x1e5795e5 },
1760 { 0x000098e0, 0x02008020 },
1761};
1762
1763static const u32 ar5416BB_RfGain_9160[][3] = {
1764 { 0x00009a00, 0x00000000, 0x00000000 },
1765 { 0x00009a04, 0x00000040, 0x00000040 },
1766 { 0x00009a08, 0x00000080, 0x00000080 },
1767 { 0x00009a0c, 0x000001a1, 0x00000141 },
1768 { 0x00009a10, 0x000001e1, 0x00000181 },
1769 { 0x00009a14, 0x00000021, 0x000001c1 },
1770 { 0x00009a18, 0x00000061, 0x00000001 },
1771 { 0x00009a1c, 0x00000168, 0x00000041 },
1772 { 0x00009a20, 0x000001a8, 0x000001a8 },
1773 { 0x00009a24, 0x000001e8, 0x000001e8 },
1774 { 0x00009a28, 0x00000028, 0x00000028 },
1775 { 0x00009a2c, 0x00000068, 0x00000068 },
1776 { 0x00009a30, 0x00000189, 0x000000a8 },
1777 { 0x00009a34, 0x000001c9, 0x00000169 },
1778 { 0x00009a38, 0x00000009, 0x000001a9 },
1779 { 0x00009a3c, 0x00000049, 0x000001e9 },
1780 { 0x00009a40, 0x00000089, 0x00000029 },
1781 { 0x00009a44, 0x00000170, 0x00000069 },
1782 { 0x00009a48, 0x000001b0, 0x00000190 },
1783 { 0x00009a4c, 0x000001f0, 0x000001d0 },
1784 { 0x00009a50, 0x00000030, 0x00000010 },
1785 { 0x00009a54, 0x00000070, 0x00000050 },
1786 { 0x00009a58, 0x00000191, 0x00000090 },
1787 { 0x00009a5c, 0x000001d1, 0x00000151 },
1788 { 0x00009a60, 0x00000011, 0x00000191 },
1789 { 0x00009a64, 0x00000051, 0x000001d1 },
1790 { 0x00009a68, 0x00000091, 0x00000011 },
1791 { 0x00009a6c, 0x000001b8, 0x00000051 },
1792 { 0x00009a70, 0x000001f8, 0x00000198 },
1793 { 0x00009a74, 0x00000038, 0x000001d8 },
1794 { 0x00009a78, 0x00000078, 0x00000018 },
1795 { 0x00009a7c, 0x00000199, 0x00000058 },
1796 { 0x00009a80, 0x000001d9, 0x00000098 },
1797 { 0x00009a84, 0x00000019, 0x00000159 },
1798 { 0x00009a88, 0x00000059, 0x00000199 },
1799 { 0x00009a8c, 0x00000099, 0x000001d9 },
1800 { 0x00009a90, 0x000000d9, 0x00000019 },
1801 { 0x00009a94, 0x000000f9, 0x00000059 },
1802 { 0x00009a98, 0x000000f9, 0x00000099 },
1803 { 0x00009a9c, 0x000000f9, 0x000000d9 },
1804 { 0x00009aa0, 0x000000f9, 0x000000f9 },
1805 { 0x00009aa4, 0x000000f9, 0x000000f9 },
1806 { 0x00009aa8, 0x000000f9, 0x000000f9 },
1807 { 0x00009aac, 0x000000f9, 0x000000f9 },
1808 { 0x00009ab0, 0x000000f9, 0x000000f9 },
1809 { 0x00009ab4, 0x000000f9, 0x000000f9 },
1810 { 0x00009ab8, 0x000000f9, 0x000000f9 },
1811 { 0x00009abc, 0x000000f9, 0x000000f9 },
1812 { 0x00009ac0, 0x000000f9, 0x000000f9 },
1813 { 0x00009ac4, 0x000000f9, 0x000000f9 },
1814 { 0x00009ac8, 0x000000f9, 0x000000f9 },
1815 { 0x00009acc, 0x000000f9, 0x000000f9 },
1816 { 0x00009ad0, 0x000000f9, 0x000000f9 },
1817 { 0x00009ad4, 0x000000f9, 0x000000f9 },
1818 { 0x00009ad8, 0x000000f9, 0x000000f9 },
1819 { 0x00009adc, 0x000000f9, 0x000000f9 },
1820 { 0x00009ae0, 0x000000f9, 0x000000f9 },
1821 { 0x00009ae4, 0x000000f9, 0x000000f9 },
1822 { 0x00009ae8, 0x000000f9, 0x000000f9 },
1823 { 0x00009aec, 0x000000f9, 0x000000f9 },
1824 { 0x00009af0, 0x000000f9, 0x000000f9 },
1825 { 0x00009af4, 0x000000f9, 0x000000f9 },
1826 { 0x00009af8, 0x000000f9, 0x000000f9 },
1827 { 0x00009afc, 0x000000f9, 0x000000f9 },
1828};
1829
1830static const u32 ar5416Bank1_9160[][2] = {
1831 { 0x000098b0, 0x02108421 },
1832 { 0x000098ec, 0x00000008 },
1833};
1834
1835static const u32 ar5416Bank2_9160[][2] = {
1836 { 0x000098b0, 0x0e73ff17 },
1837 { 0x000098e0, 0x00000420 },
1838};
1839
1840static const u32 ar5416Bank3_9160[][3] = {
1841 { 0x000098f0, 0x01400018, 0x01c00018 },
1842};
1843
1844static const u32 ar5416Bank6_9160[][3] = {
1845 { 0x0000989c, 0x00000000, 0x00000000 },
1846 { 0x0000989c, 0x00000000, 0x00000000 },
1847 { 0x0000989c, 0x00000000, 0x00000000 },
1848 { 0x0000989c, 0x00e00000, 0x00e00000 },
1849 { 0x0000989c, 0x005e0000, 0x005e0000 },
1850 { 0x0000989c, 0x00120000, 0x00120000 },
1851 { 0x0000989c, 0x00620000, 0x00620000 },
1852 { 0x0000989c, 0x00020000, 0x00020000 },
1853 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1854 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1855 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1856 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
1857 { 0x0000989c, 0x005f0000, 0x005f0000 },
1858 { 0x0000989c, 0x00870000, 0x00870000 },
1859 { 0x0000989c, 0x00f90000, 0x00f90000 },
1860 { 0x0000989c, 0x007b0000, 0x007b0000 },
1861 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1862 { 0x0000989c, 0x00f50000, 0x00f50000 },
1863 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1864 { 0x0000989c, 0x00110000, 0x00110000 },
1865 { 0x0000989c, 0x006100a8, 0x006100a8 },
1866 { 0x0000989c, 0x004210a2, 0x004210a2 },
1867 { 0x0000989c, 0x0014008f, 0x0014008f },
1868 { 0x0000989c, 0x00c40003, 0x00c40003 },
1869 { 0x0000989c, 0x003000f2, 0x003000f2 },
1870 { 0x0000989c, 0x00440016, 0x00440016 },
1871 { 0x0000989c, 0x00410040, 0x00410040 },
1872 { 0x0000989c, 0x0001805e, 0x0001805e },
1873 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
1874 { 0x0000989c, 0x000000f1, 0x000000f1 },
1875 { 0x0000989c, 0x00002081, 0x00002081 },
1876 { 0x0000989c, 0x000000d4, 0x000000d4 },
1877 { 0x000098d0, 0x0000000f, 0x0010000f },
1878};
1879
1880static const u32 ar5416Bank6TPC_9160[][3] = {
1881 { 0x0000989c, 0x00000000, 0x00000000 },
1882 { 0x0000989c, 0x00000000, 0x00000000 },
1883 { 0x0000989c, 0x00000000, 0x00000000 },
1884 { 0x0000989c, 0x00e00000, 0x00e00000 },
1885 { 0x0000989c, 0x005e0000, 0x005e0000 },
1886 { 0x0000989c, 0x00120000, 0x00120000 },
1887 { 0x0000989c, 0x00620000, 0x00620000 },
1888 { 0x0000989c, 0x00020000, 0x00020000 },
1889 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1890 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1891 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1892 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
1893 { 0x0000989c, 0x005f0000, 0x005f0000 },
1894 { 0x0000989c, 0x00870000, 0x00870000 },
1895 { 0x0000989c, 0x00f90000, 0x00f90000 },
1896 { 0x0000989c, 0x007b0000, 0x007b0000 },
1897 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1898 { 0x0000989c, 0x00f50000, 0x00f50000 },
1899 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1900 { 0x0000989c, 0x00110000, 0x00110000 },
1901 { 0x0000989c, 0x006100a8, 0x006100a8 },
1902 { 0x0000989c, 0x00423022, 0x00423022 },
1903 { 0x0000989c, 0x2014008f, 0x2014008f },
1904 { 0x0000989c, 0x00c40002, 0x00c40002 },
1905 { 0x0000989c, 0x003000f2, 0x003000f2 },
1906 { 0x0000989c, 0x00440016, 0x00440016 },
1907 { 0x0000989c, 0x00410040, 0x00410040 },
1908 { 0x0000989c, 0x0001805e, 0x0001805e },
1909 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
1910 { 0x0000989c, 0x000000e1, 0x000000e1 },
1911 { 0x0000989c, 0x00007080, 0x00007080 },
1912 { 0x0000989c, 0x000000d4, 0x000000d4 },
1913 { 0x000098d0, 0x0000000f, 0x0010000f },
1914};
1915
1916static const u32 ar5416Bank7_9160[][2] = {
1917 { 0x0000989c, 0x00000500 },
1918 { 0x0000989c, 0x00000800 },
1919 { 0x000098cc, 0x0000000e },
1920};
1921
1922static u32 ar5416Addac_9160[][2] = {
1923 {0x0000989c, 0x00000000 },
1924 {0x0000989c, 0x00000000 },
1925 {0x0000989c, 0x00000000 },
1926 {0x0000989c, 0x00000000 },
1927 {0x0000989c, 0x00000000 },
1928 {0x0000989c, 0x00000000 },
1929 {0x0000989c, 0x000000c0 },
1930 {0x0000989c, 0x00000018 },
1931 {0x0000989c, 0x00000004 },
1932 {0x0000989c, 0x00000000 },
1933 {0x0000989c, 0x00000000 },
1934 {0x0000989c, 0x00000000 },
1935 {0x0000989c, 0x00000000 },
1936 {0x0000989c, 0x00000000 },
1937 {0x0000989c, 0x00000000 },
1938 {0x0000989c, 0x00000000 },
1939 {0x0000989c, 0x00000000 },
1940 {0x0000989c, 0x00000000 },
1941 {0x0000989c, 0x00000000 },
1942 {0x0000989c, 0x00000000 },
1943 {0x0000989c, 0x00000000 },
1944 {0x0000989c, 0x000000c0 },
1945 {0x0000989c, 0x00000019 },
1946 {0x0000989c, 0x00000004 },
1947 {0x0000989c, 0x00000000 },
1948 {0x0000989c, 0x00000000 },
1949 {0x0000989c, 0x00000000 },
1950 {0x0000989c, 0x00000004 },
1951 {0x0000989c, 0x00000003 },
1952 {0x0000989c, 0x00000008 },
1953 {0x0000989c, 0x00000000 },
1954 {0x000098cc, 0x00000000 },
1955};
1956
1957static u32 ar5416Addac_91601_1[][2] = {
1958 {0x0000989c, 0x00000000 },
1959 {0x0000989c, 0x00000000 },
1960 {0x0000989c, 0x00000000 },
1961 {0x0000989c, 0x00000000 },
1962 {0x0000989c, 0x00000000 },
1963 {0x0000989c, 0x00000000 },
1964 {0x0000989c, 0x000000c0 },
1965 {0x0000989c, 0x00000018 },
1966 {0x0000989c, 0x00000004 },
1967 {0x0000989c, 0x00000000 },
1968 {0x0000989c, 0x00000000 },
1969 {0x0000989c, 0x00000000 },
1970 {0x0000989c, 0x00000000 },
1971 {0x0000989c, 0x00000000 },
1972 {0x0000989c, 0x00000000 },
1973 {0x0000989c, 0x00000000 },
1974 {0x0000989c, 0x00000000 },
1975 {0x0000989c, 0x00000000 },
1976 {0x0000989c, 0x00000000 },
1977 {0x0000989c, 0x00000000 },
1978 {0x0000989c, 0x00000000 },
1979 {0x0000989c, 0x000000c0 },
1980 {0x0000989c, 0x00000019 },
1981 {0x0000989c, 0x00000004 },
1982 {0x0000989c, 0x00000000 },
1983 {0x0000989c, 0x00000000 },
1984 {0x0000989c, 0x00000000 },
1985 {0x0000989c, 0x00000000 },
1986 {0x0000989c, 0x00000000 },
1987 {0x0000989c, 0x00000000 },
1988 {0x0000989c, 0x00000000 },
1989 {0x000098cc, 0x00000000 },
1990};
1991
1992/* XXX 9280 1 */
1993static const u32 ar9280Modes_9280[][6] = { 20static const u32 ar9280Modes_9280[][6] = {
1994 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 21 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
1995 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, 22 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -2766,7 +793,7 @@ static const u32 ar9280Common_9280_2[][2] = {
2766 { 0x00008258, 0x00000000 }, 793 { 0x00008258, 0x00000000 },
2767 { 0x0000825c, 0x400000ff }, 794 { 0x0000825c, 0x400000ff },
2768 { 0x00008260, 0x00080922 }, 795 { 0x00008260, 0x00080922 },
2769 { 0x00008264, 0xa8a00010 }, 796 { 0x00008264, 0x88a00010 },
2770 { 0x00008270, 0x00000000 }, 797 { 0x00008270, 0x00000000 },
2771 { 0x00008274, 0x40000000 }, 798 { 0x00008274, 0x40000000 },
2772 { 0x00008278, 0x003e4180 }, 799 { 0x00008278, 0x003e4180 },
@@ -3441,7 +1468,7 @@ static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = {
3441}; 1468};
3442 1469
3443/* AR9285 Revsion 10*/ 1470/* AR9285 Revsion 10*/
3444static const u_int32_t ar9285Modes_9285[][6] = { 1471static const u32 ar9285Modes_9285[][6] = {
3445 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 1472 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
3446 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, 1473 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
3447 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, 1474 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
@@ -3763,7 +1790,7 @@ static const u_int32_t ar9285Modes_9285[][6] = {
3763 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, 1790 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
3764}; 1791};
3765 1792
3766static const u_int32_t ar9285Common_9285[][2] = { 1793static const u32 ar9285Common_9285[][2] = {
3767 { 0x0000000c, 0x00000000 }, 1794 { 0x0000000c, 0x00000000 },
3768 { 0x00000030, 0x00020045 }, 1795 { 0x00000030, 0x00020045 },
3769 { 0x00000034, 0x00000005 }, 1796 { 0x00000034, 0x00000005 },
@@ -3936,7 +1963,7 @@ static const u_int32_t ar9285Common_9285[][2] = {
3936 { 0x00008258, 0x00000000 }, 1963 { 0x00008258, 0x00000000 },
3937 { 0x0000825c, 0x400000ff }, 1964 { 0x0000825c, 0x400000ff },
3938 { 0x00008260, 0x00080922 }, 1965 { 0x00008260, 0x00080922 },
3939 { 0x00008264, 0xa8a00010 }, 1966 { 0x00008264, 0x88a00010 },
3940 { 0x00008270, 0x00000000 }, 1967 { 0x00008270, 0x00000000 },
3941 { 0x00008274, 0x40000000 }, 1968 { 0x00008274, 0x40000000 },
3942 { 0x00008278, 0x003e4180 }, 1969 { 0x00008278, 0x003e4180 },
@@ -4096,7 +2123,7 @@ static const u_int32_t ar9285Common_9285[][2] = {
4096 { 0x00007870, 0x10142c00 }, 2123 { 0x00007870, 0x10142c00 },
4097}; 2124};
4098 2125
4099static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285[][2] = { 2126static const u32 ar9285PciePhy_clkreq_always_on_L1_9285[][2] = {
4100 {0x00004040, 0x9248fd00 }, 2127 {0x00004040, 0x9248fd00 },
4101 {0x00004040, 0x24924924 }, 2128 {0x00004040, 0x24924924 },
4102 {0x00004040, 0xa8000019 }, 2129 {0x00004040, 0xa8000019 },
@@ -4109,7 +2136,7 @@ static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285[][2] = {
4109 {0x00004044, 0x00000000 }, 2136 {0x00004044, 0x00000000 },
4110}; 2137};
4111 2138
4112static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285[][2] = { 2139static const u32 ar9285PciePhy_clkreq_off_L1_9285[][2] = {
4113 {0x00004040, 0x9248fd00 }, 2140 {0x00004040, 0x9248fd00 },
4114 {0x00004040, 0x24924924 }, 2141 {0x00004040, 0x24924924 },
4115 {0x00004040, 0xa8000019 }, 2142 {0x00004040, 0xa8000019 },
@@ -4123,7 +2150,7 @@ static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285[][2] = {
4123}; 2150};
4124 2151
4125/* AR9285 v1_2 PCI Register Writes. Created: 04/13/09 */ 2152/* AR9285 v1_2 PCI Register Writes. Created: 04/13/09 */
4126static const u_int32_t ar9285Modes_9285_1_2[][6] = { 2153static const u32 ar9285Modes_9285_1_2[][6] = {
4127 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 2154 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
4128 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 2155 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
4129 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, 2156 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -4429,7 +2456,7 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
4429 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, 2456 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
4430}; 2457};
4431 2458
4432static const u_int32_t ar9285Common_9285_1_2[][2] = { 2459static const u32 ar9285Common_9285_1_2[][2] = {
4433 { 0x0000000c, 0x00000000 }, 2460 { 0x0000000c, 0x00000000 },
4434 { 0x00000030, 0x00020045 }, 2461 { 0x00000030, 0x00020045 },
4435 { 0x00000034, 0x00000005 }, 2462 { 0x00000034, 0x00000005 },
@@ -4748,7 +2775,7 @@ static const u_int32_t ar9285Common_9285_1_2[][2] = {
4748 { 0x00007870, 0x10142c00 }, 2775 { 0x00007870, 0x10142c00 },
4749}; 2776};
4750 2777
4751static const u_int32_t ar9285Modes_high_power_tx_gain_9285_1_2[][6] = { 2778static const u32 ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
4752 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 2779 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
4753 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 2780 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
4754 { 0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000 }, 2781 { 0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000 },
@@ -4789,7 +2816,7 @@ static const u_int32_t ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
4789 { 0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 }, 2816 { 0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 },
4790}; 2817};
4791 2818
4792static const u_int32_t ar9285Modes_original_tx_gain_9285_1_2[][6] = { 2819static const u32 ar9285Modes_original_tx_gain_9285_1_2[][6] = {
4793 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 2820 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
4794 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 2821 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
4795 { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 }, 2822 { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
@@ -4830,7 +2857,7 @@ static const u_int32_t ar9285Modes_original_tx_gain_9285_1_2[][6] = {
4830 { 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c }, 2857 { 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
4831}; 2858};
4832 2859
4833static const u_int32_t ar9285Modes_XE2_0_normal_power[][6] = { 2860static const u32 ar9285Modes_XE2_0_normal_power[][6] = {
4834 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 2861 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
4835 { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 }, 2862 { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
4836 { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 }, 2863 { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 },
@@ -4870,7 +2897,7 @@ static const u_int32_t ar9285Modes_XE2_0_normal_power[][6] = {
4870 { 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c }, 2897 { 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
4871}; 2898};
4872 2899
4873static const u_int32_t ar9285Modes_XE2_0_high_power[][6] = { 2900static const u32 ar9285Modes_XE2_0_high_power[][6] = {
4874 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 2901 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
4875 { 0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000 }, 2902 { 0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000 },
4876 { 0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201, 0x00000000 }, 2903 { 0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201, 0x00000000 },
@@ -4910,7 +2937,7 @@ static const u_int32_t ar9285Modes_XE2_0_high_power[][6] = {
4910 { 0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 }, 2937 { 0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 },
4911}; 2938};
4912 2939
4913static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = { 2940static const u32 ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = {
4914 {0x00004040, 0x9248fd00 }, 2941 {0x00004040, 0x9248fd00 },
4915 {0x00004040, 0x24924924 }, 2942 {0x00004040, 0x24924924 },
4916 {0x00004040, 0xa8000019 }, 2943 {0x00004040, 0xa8000019 },
@@ -4923,7 +2950,7 @@ static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = {
4923 {0x00004044, 0x00000000 }, 2950 {0x00004044, 0x00000000 },
4924}; 2951};
4925 2952
4926static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = { 2953static const u32 ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = {
4927 {0x00004040, 0x9248fd00 }, 2954 {0x00004040, 0x9248fd00 },
4928 {0x00004040, 0x24924924 }, 2955 {0x00004040, 0x24924924 },
4929 {0x00004040, 0xa8000019 }, 2956 {0x00004040, 0xa8000019 },
@@ -4937,7 +2964,7 @@ static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = {
4937}; 2964};
4938 2965
4939/* AR9287 Revision 10 */ 2966/* AR9287 Revision 10 */
4940static const u_int32_t ar9287Modes_9287_1_0[][6] = { 2967static const u32 ar9287Modes_9287_1_0[][6] = {
4941 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 2968 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
4942 { 0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0 }, 2969 { 0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0 },
4943 { 0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0 }, 2970 { 0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -4984,7 +3011,7 @@ static const u_int32_t ar9287Modes_9287_1_0[][6] = {
4984 { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 3011 { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
4985}; 3012};
4986 3013
4987static const u_int32_t ar9287Common_9287_1_0[][2] = { 3014static const u32 ar9287Common_9287_1_0[][2] = {
4988 { 0x0000000c, 0x00000000 }, 3015 { 0x0000000c, 0x00000000 },
4989 { 0x00000030, 0x00020015 }, 3016 { 0x00000030, 0x00020015 },
4990 { 0x00000034, 0x00000005 }, 3017 { 0x00000034, 0x00000005 },
@@ -5158,7 +3185,7 @@ static const u_int32_t ar9287Common_9287_1_0[][2] = {
5158 { 0x00008258, 0x00000000 }, 3185 { 0x00008258, 0x00000000 },
5159 { 0x0000825c, 0x400000ff }, 3186 { 0x0000825c, 0x400000ff },
5160 { 0x00008260, 0x00080922 }, 3187 { 0x00008260, 0x00080922 },
5161 { 0x00008264, 0xa8a00010 }, 3188 { 0x00008264, 0x88a00010 },
5162 { 0x00008270, 0x00000000 }, 3189 { 0x00008270, 0x00000000 },
5163 { 0x00008274, 0x40000000 }, 3190 { 0x00008274, 0x40000000 },
5164 { 0x00008278, 0x003e4180 }, 3191 { 0x00008278, 0x003e4180 },
@@ -5355,7 +3382,7 @@ static const u_int32_t ar9287Common_9287_1_0[][2] = {
5355 { 0x000078b8, 0x2a850160 }, 3382 { 0x000078b8, 0x2a850160 },
5356}; 3383};
5357 3384
5358static const u_int32_t ar9287Modes_tx_gain_9287_1_0[][6] = { 3385static const u32 ar9287Modes_tx_gain_9287_1_0[][6] = {
5359 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 3386 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
5360 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 3387 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
5361 { 0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002 }, 3388 { 0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002 },
@@ -5405,7 +3432,7 @@ static const u_int32_t ar9287Modes_tx_gain_9287_1_0[][6] = {
5405}; 3432};
5406 3433
5407 3434
5408static const u_int32_t ar9287Modes_rx_gain_9287_1_0[][6] = { 3435static const u32 ar9287Modes_rx_gain_9287_1_0[][6] = {
5409 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 3436 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
5410 { 0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 }, 3437 { 0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 },
5411 { 0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 }, 3438 { 0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 },
@@ -5667,7 +3694,7 @@ static const u_int32_t ar9287Modes_rx_gain_9287_1_0[][6] = {
5667 { 0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 }, 3694 { 0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 },
5668}; 3695};
5669 3696
5670static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_0[][2] = { 3697static const u32 ar9287PciePhy_clkreq_always_on_L1_9287_1_0[][2] = {
5671 {0x00004040, 0x9248fd00 }, 3698 {0x00004040, 0x9248fd00 },
5672 {0x00004040, 0x24924924 }, 3699 {0x00004040, 0x24924924 },
5673 {0x00004040, 0xa8000019 }, 3700 {0x00004040, 0xa8000019 },
@@ -5680,7 +3707,7 @@ static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_0[][2] = {
5680 {0x00004044, 0x00000000 }, 3707 {0x00004044, 0x00000000 },
5681}; 3708};
5682 3709
5683static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_0[][2] = { 3710static const u32 ar9287PciePhy_clkreq_off_L1_9287_1_0[][2] = {
5684 {0x00004040, 0x9248fd00 }, 3711 {0x00004040, 0x9248fd00 },
5685 {0x00004040, 0x24924924 }, 3712 {0x00004040, 0x24924924 },
5686 {0x00004040, 0xa8000019 }, 3713 {0x00004040, 0xa8000019 },
@@ -5695,7 +3722,7 @@ static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_0[][2] = {
5695 3722
5696/* AR9287 Revision 11 */ 3723/* AR9287 Revision 11 */
5697 3724
5698static const u_int32_t ar9287Modes_9287_1_1[][6] = { 3725static const u32 ar9287Modes_9287_1_1[][6] = {
5699 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 3726 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
5700 { 0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0 }, 3727 { 0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0 },
5701 { 0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0 }, 3728 { 0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -5742,7 +3769,7 @@ static const u_int32_t ar9287Modes_9287_1_1[][6] = {
5742 { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 3769 { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
5743}; 3770};
5744 3771
5745static const u_int32_t ar9287Common_9287_1_1[][2] = { 3772static const u32 ar9287Common_9287_1_1[][2] = {
5746 { 0x0000000c, 0x00000000 }, 3773 { 0x0000000c, 0x00000000 },
5747 { 0x00000030, 0x00020015 }, 3774 { 0x00000030, 0x00020015 },
5748 { 0x00000034, 0x00000005 }, 3775 { 0x00000034, 0x00000005 },
@@ -6112,21 +4139,22 @@ static const u_int32_t ar9287Common_9287_1_1[][2] = {
6112 4139
6113/* 4140/*
6114 * For Japanese regulatory requirements, 2484 MHz requires the following three 4141 * For Japanese regulatory requirements, 2484 MHz requires the following three
6115 * registers be programmed differently from the channel between 2412 and 2472 MHz. 4142 * registers be programmed differently from the channel between 2412 and
4143 * 2472 MHz.
6116 */ 4144 */
6117static const u_int32_t ar9287Common_normal_cck_fir_coeff_92871_1[][2] = { 4145static const u32 ar9287Common_normal_cck_fir_coeff_92871_1[][2] = {
6118 { 0x0000a1f4, 0x00fffeff }, 4146 { 0x0000a1f4, 0x00fffeff },
6119 { 0x0000a1f8, 0x00f5f9ff }, 4147 { 0x0000a1f8, 0x00f5f9ff },
6120 { 0x0000a1fc, 0xb79f6427 }, 4148 { 0x0000a1fc, 0xb79f6427 },
6121}; 4149};
6122 4150
6123static const u_int32_t ar9287Common_japan_2484_cck_fir_coeff_92871_1[][2] = { 4151static const u32 ar9287Common_japan_2484_cck_fir_coeff_92871_1[][2] = {
6124 { 0x0000a1f4, 0x00000000 }, 4152 { 0x0000a1f4, 0x00000000 },
6125 { 0x0000a1f8, 0xefff0301 }, 4153 { 0x0000a1f8, 0xefff0301 },
6126 { 0x0000a1fc, 0xca9228ee }, 4154 { 0x0000a1fc, 0xca9228ee },
6127}; 4155};
6128 4156
6129static const u_int32_t ar9287Modes_tx_gain_9287_1_1[][6] = { 4157static const u32 ar9287Modes_tx_gain_9287_1_1[][6] = {
6130 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 4158 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
6131 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 4159 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
6132 { 0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002 }, 4160 { 0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002 },
@@ -6175,7 +4203,7 @@ static const u_int32_t ar9287Modes_tx_gain_9287_1_1[][6] = {
6175 { 0x0000a274, 0x0a180000, 0x0a180000, 0x0a1aa000, 0x0a1aa000, 0x0a1aa000 }, 4203 { 0x0000a274, 0x0a180000, 0x0a180000, 0x0a1aa000, 0x0a1aa000, 0x0a1aa000 },
6176}; 4204};
6177 4205
6178static const u_int32_t ar9287Modes_rx_gain_9287_1_1[][6] = { 4206static const u32 ar9287Modes_rx_gain_9287_1_1[][6] = {
6179 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 4207 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
6180 { 0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 }, 4208 { 0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 },
6181 { 0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 }, 4209 { 0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 },
@@ -6437,7 +4465,7 @@ static const u_int32_t ar9287Modes_rx_gain_9287_1_1[][6] = {
6437 { 0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 }, 4465 { 0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 },
6438}; 4466};
6439 4467
6440static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = { 4468static const u32 ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = {
6441 {0x00004040, 0x9248fd00 }, 4469 {0x00004040, 0x9248fd00 },
6442 {0x00004040, 0x24924924 }, 4470 {0x00004040, 0x24924924 },
6443 {0x00004040, 0xa8000019 }, 4471 {0x00004040, 0xa8000019 },
@@ -6450,7 +4478,7 @@ static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = {
6450 {0x00004044, 0x00000000 }, 4478 {0x00004044, 0x00000000 },
6451}; 4479};
6452 4480
6453static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = { 4481static const u32 ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
6454 {0x00004040, 0x9248fd00 }, 4482 {0x00004040, 0x9248fd00 },
6455 {0x00004040, 0x24924924 }, 4483 {0x00004040, 0x24924924 },
6456 {0x00004040, 0xa8000019 }, 4484 {0x00004040, 0xa8000019 },
@@ -6465,7 +4493,7 @@ static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
6465 4493
6466 4494
6467/* AR9271 initialization values automaticaly created: 06/04/09 */ 4495/* AR9271 initialization values automaticaly created: 06/04/09 */
6468static const u_int32_t ar9271Modes_9271[][6] = { 4496static const u32 ar9271Modes_9271[][6] = {
6469 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 4497 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
6470 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, 4498 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
6471 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, 4499 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
@@ -6771,7 +4799,7 @@ static const u_int32_t ar9271Modes_9271[][6] = {
6771 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, 4799 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
6772}; 4800};
6773 4801
6774static const u_int32_t ar9271Common_9271[][2] = { 4802static const u32 ar9271Common_9271[][2] = {
6775 { 0x0000000c, 0x00000000 }, 4803 { 0x0000000c, 0x00000000 },
6776 { 0x00000030, 0x00020045 }, 4804 { 0x00000030, 0x00020045 },
6777 { 0x00000034, 0x00000005 }, 4805 { 0x00000034, 0x00000005 },
@@ -6945,7 +4973,7 @@ static const u_int32_t ar9271Common_9271[][2] = {
6945 { 0x00008258, 0x00000000 }, 4973 { 0x00008258, 0x00000000 },
6946 { 0x0000825c, 0x400000ff }, 4974 { 0x0000825c, 0x400000ff },
6947 { 0x00008260, 0x00080922 }, 4975 { 0x00008260, 0x00080922 },
6948 { 0x00008264, 0xa8a00010 }, 4976 { 0x00008264, 0x88a00010 },
6949 { 0x00008270, 0x00000000 }, 4977 { 0x00008270, 0x00000000 },
6950 { 0x00008274, 0x40000000 }, 4978 { 0x00008274, 0x40000000 },
6951 { 0x00008278, 0x003e4180 }, 4979 { 0x00008278, 0x003e4180 },
@@ -7099,24 +5127,24 @@ static const u_int32_t ar9271Common_9271[][2] = {
7099 { 0x0000d384, 0xf3307ff0 }, 5127 { 0x0000d384, 0xf3307ff0 },
7100}; 5128};
7101 5129
7102static const u_int32_t ar9271Common_normal_cck_fir_coeff_9271[][2] = { 5130static const u32 ar9271Common_normal_cck_fir_coeff_9271[][2] = {
7103 { 0x0000a1f4, 0x00fffeff }, 5131 { 0x0000a1f4, 0x00fffeff },
7104 { 0x0000a1f8, 0x00f5f9ff }, 5132 { 0x0000a1f8, 0x00f5f9ff },
7105 { 0x0000a1fc, 0xb79f6427 }, 5133 { 0x0000a1fc, 0xb79f6427 },
7106}; 5134};
7107 5135
7108static const u_int32_t ar9271Common_japan_2484_cck_fir_coeff_9271[][2] = { 5136static const u32 ar9271Common_japan_2484_cck_fir_coeff_9271[][2] = {
7109 { 0x0000a1f4, 0x00000000 }, 5137 { 0x0000a1f4, 0x00000000 },
7110 { 0x0000a1f8, 0xefff0301 }, 5138 { 0x0000a1f8, 0xefff0301 },
7111 { 0x0000a1fc, 0xca9228ee }, 5139 { 0x0000a1fc, 0xca9228ee },
7112}; 5140};
7113 5141
7114static const u_int32_t ar9271Modes_9271_1_0_only[][6] = { 5142static const u32 ar9271Modes_9271_1_0_only[][6] = {
7115 { 0x00009910, 0x30002311, 0x30002311, 0x30002311, 0x30002311, 0x30002311 }, 5143 { 0x00009910, 0x30002311, 0x30002311, 0x30002311, 0x30002311, 0x30002311 },
7116 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, 5144 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
7117}; 5145};
7118 5146
7119static const u_int32_t ar9271Modes_9271_ANI_reg[][6] = { 5147static const u32 ar9271Modes_9271_ANI_reg[][6] = {
7120 { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 }, 5148 { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 },
7121 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e }, 5149 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e },
7122 { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e }, 5150 { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e },
@@ -7127,7 +5155,7 @@ static const u_int32_t ar9271Modes_9271_ANI_reg[][6] = {
7127 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, 5155 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
7128}; 5156};
7129 5157
7130static const u_int32_t ar9271Modes_normal_power_tx_gain_9271[][6] = { 5158static const u32 ar9271Modes_normal_power_tx_gain_9271[][6] = {
7131 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 5159 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
7132 { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 }, 5160 { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
7133 { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 }, 5161 { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 },
@@ -7163,7 +5191,7 @@ static const u_int32_t ar9271Modes_normal_power_tx_gain_9271[][6] = {
7163 { 0x0000a3e0, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd }, 5191 { 0x0000a3e0, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd },
7164}; 5192};
7165 5193
7166static const u_int32_t ar9271Modes_high_power_tx_gain_9271[][6] = { 5194static const u32 ar9271Modes_high_power_tx_gain_9271[][6] = {
7167 { 0x0000a300, 0x00000000, 0x00000000, 0x00010000, 0x00010000, 0x00000000 }, 5195 { 0x0000a300, 0x00000000, 0x00000000, 0x00010000, 0x00010000, 0x00000000 },
7168 { 0x0000a304, 0x00000000, 0x00000000, 0x00016200, 0x00016200, 0x00000000 }, 5196 { 0x0000a304, 0x00000000, 0x00000000, 0x00016200, 0x00016200, 0x00000000 },
7169 { 0x0000a308, 0x00000000, 0x00000000, 0x00018201, 0x00018201, 0x00000000 }, 5197 { 0x0000a308, 0x00000000, 0x00000000, 0x00018201, 0x00018201, 0x00000000 },
@@ -7198,3 +5226,5 @@ static const u_int32_t ar9271Modes_high_power_tx_gain_9271[][6] = {
7198 { 0x0000a3dc, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63 }, 5226 { 0x0000a3dc, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63 },
7199 { 0x0000a3e0, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063 }, 5227 { 0x0000a3e0, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063 },
7200}; 5228};
5229
5230#endif /* INITVALS_9002_10_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
new file mode 100644
index 000000000000..2be20d2070c4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -0,0 +1,480 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "hw.h"
18
19#define AR_BufLen 0x00000fff
20
21static void ar9002_hw_rx_enable(struct ath_hw *ah)
22{
23 REG_WRITE(ah, AR_CR, AR_CR_RXE);
24}
25
26static void ar9002_hw_set_desc_link(void *ds, u32 ds_link)
27{
28 ((struct ath_desc*) ds)->ds_link = ds_link;
29}
30
31static void ar9002_hw_get_desc_link(void *ds, u32 **ds_link)
32{
33 *ds_link = &((struct ath_desc *)ds)->ds_link;
34}
35
36static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
37{
38 u32 isr = 0;
39 u32 mask2 = 0;
40 struct ath9k_hw_capabilities *pCap = &ah->caps;
41 u32 sync_cause = 0;
42 bool fatal_int = false;
43 struct ath_common *common = ath9k_hw_common(ah);
44
45 if (!AR_SREV_9100(ah)) {
46 if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
47 if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
48 == AR_RTC_STATUS_ON) {
49 isr = REG_READ(ah, AR_ISR);
50 }
51 }
52
53 sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) &
54 AR_INTR_SYNC_DEFAULT;
55
56 *masked = 0;
57
58 if (!isr && !sync_cause)
59 return false;
60 } else {
61 *masked = 0;
62 isr = REG_READ(ah, AR_ISR);
63 }
64
65 if (isr) {
66 if (isr & AR_ISR_BCNMISC) {
67 u32 isr2;
68 isr2 = REG_READ(ah, AR_ISR_S2);
69 if (isr2 & AR_ISR_S2_TIM)
70 mask2 |= ATH9K_INT_TIM;
71 if (isr2 & AR_ISR_S2_DTIM)
72 mask2 |= ATH9K_INT_DTIM;
73 if (isr2 & AR_ISR_S2_DTIMSYNC)
74 mask2 |= ATH9K_INT_DTIMSYNC;
75 if (isr2 & (AR_ISR_S2_CABEND))
76 mask2 |= ATH9K_INT_CABEND;
77 if (isr2 & AR_ISR_S2_GTT)
78 mask2 |= ATH9K_INT_GTT;
79 if (isr2 & AR_ISR_S2_CST)
80 mask2 |= ATH9K_INT_CST;
81 if (isr2 & AR_ISR_S2_TSFOOR)
82 mask2 |= ATH9K_INT_TSFOOR;
83 }
84
85 isr = REG_READ(ah, AR_ISR_RAC);
86 if (isr == 0xffffffff) {
87 *masked = 0;
88 return false;
89 }
90
91 *masked = isr & ATH9K_INT_COMMON;
92
93 if (ah->config.rx_intr_mitigation) {
94 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
95 *masked |= ATH9K_INT_RX;
96 }
97
98 if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
99 *masked |= ATH9K_INT_RX;
100 if (isr &
101 (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
102 AR_ISR_TXEOL)) {
103 u32 s0_s, s1_s;
104
105 *masked |= ATH9K_INT_TX;
106
107 s0_s = REG_READ(ah, AR_ISR_S0_S);
108 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
109 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
110
111 s1_s = REG_READ(ah, AR_ISR_S1_S);
112 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
113 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
114 }
115
116 if (isr & AR_ISR_RXORN) {
117 ath_print(common, ATH_DBG_INTERRUPT,
118 "receive FIFO overrun interrupt\n");
119 }
120
121 if (!AR_SREV_9100(ah)) {
122 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
123 u32 isr5 = REG_READ(ah, AR_ISR_S5_S);
124 if (isr5 & AR_ISR_S5_TIM_TIMER)
125 *masked |= ATH9K_INT_TIM_TIMER;
126 }
127 }
128
129 *masked |= mask2;
130 }
131
132 if (AR_SREV_9100(ah))
133 return true;
134
135 if (isr & AR_ISR_GENTMR) {
136 u32 s5_s;
137
138 s5_s = REG_READ(ah, AR_ISR_S5_S);
139 if (isr & AR_ISR_GENTMR) {
140 ah->intr_gen_timer_trigger =
141 MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
142
143 ah->intr_gen_timer_thresh =
144 MS(s5_s, AR_ISR_S5_GENTIMER_THRESH);
145
146 if (ah->intr_gen_timer_trigger)
147 *masked |= ATH9K_INT_GENTIMER;
148
149 }
150 }
151
152 if (sync_cause) {
153 fatal_int =
154 (sync_cause &
155 (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
156 ? true : false;
157
158 if (fatal_int) {
159 if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
160 ath_print(common, ATH_DBG_ANY,
161 "received PCI FATAL interrupt\n");
162 }
163 if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
164 ath_print(common, ATH_DBG_ANY,
165 "received PCI PERR interrupt\n");
166 }
167 *masked |= ATH9K_INT_FATAL;
168 }
169 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
170 ath_print(common, ATH_DBG_INTERRUPT,
171 "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
172 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
173 REG_WRITE(ah, AR_RC, 0);
174 *masked |= ATH9K_INT_FATAL;
175 }
176 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
177 ath_print(common, ATH_DBG_INTERRUPT,
178 "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
179 }
180
181 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
182 (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
183 }
184
185 return true;
186}
187
188static void ar9002_hw_fill_txdesc(struct ath_hw *ah, void *ds, u32 seglen,
189 bool is_firstseg, bool is_lastseg,
190 const void *ds0, dma_addr_t buf_addr,
191 unsigned int qcu)
192{
193 struct ar5416_desc *ads = AR5416DESC(ds);
194
195 ads->ds_data = buf_addr;
196
197 if (is_firstseg) {
198 ads->ds_ctl1 |= seglen | (is_lastseg ? 0 : AR_TxMore);
199 } else if (is_lastseg) {
200 ads->ds_ctl0 = 0;
201 ads->ds_ctl1 = seglen;
202 ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
203 ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
204 } else {
205 ads->ds_ctl0 = 0;
206 ads->ds_ctl1 = seglen | AR_TxMore;
207 ads->ds_ctl2 = 0;
208 ads->ds_ctl3 = 0;
209 }
210 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
211 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
212 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
213 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
214 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
215}
216
217static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
218 struct ath_tx_status *ts)
219{
220 struct ar5416_desc *ads = AR5416DESC(ds);
221
222 if ((ads->ds_txstatus9 & AR_TxDone) == 0)
223 return -EINPROGRESS;
224
225 ts->ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
226 ts->ts_tstamp = ads->AR_SendTimestamp;
227 ts->ts_status = 0;
228 ts->ts_flags = 0;
229
230 if (ads->ds_txstatus1 & AR_FrmXmitOK)
231 ts->ts_status |= ATH9K_TX_ACKED;
232 if (ads->ds_txstatus1 & AR_ExcessiveRetries)
233 ts->ts_status |= ATH9K_TXERR_XRETRY;
234 if (ads->ds_txstatus1 & AR_Filtered)
235 ts->ts_status |= ATH9K_TXERR_FILT;
236 if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
237 ts->ts_status |= ATH9K_TXERR_FIFO;
238 ath9k_hw_updatetxtriglevel(ah, true);
239 }
240 if (ads->ds_txstatus9 & AR_TxOpExceeded)
241 ts->ts_status |= ATH9K_TXERR_XTXOP;
242 if (ads->ds_txstatus1 & AR_TxTimerExpired)
243 ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
244
245 if (ads->ds_txstatus1 & AR_DescCfgErr)
246 ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
247 if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
248 ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
249 ath9k_hw_updatetxtriglevel(ah, true);
250 }
251 if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
252 ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
253 ath9k_hw_updatetxtriglevel(ah, true);
254 }
255 if (ads->ds_txstatus0 & AR_TxBaStatus) {
256 ts->ts_flags |= ATH9K_TX_BA;
257 ts->ba_low = ads->AR_BaBitmapLow;
258 ts->ba_high = ads->AR_BaBitmapHigh;
259 }
260
261 ts->ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
262 switch (ts->ts_rateindex) {
263 case 0:
264 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
265 break;
266 case 1:
267 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
268 break;
269 case 2:
270 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
271 break;
272 case 3:
273 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
274 break;
275 }
276
277 ts->ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
278 ts->ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
279 ts->ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
280 ts->ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
281 ts->ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
282 ts->ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
283 ts->ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
284 ts->evm0 = ads->AR_TxEVM0;
285 ts->evm1 = ads->AR_TxEVM1;
286 ts->evm2 = ads->AR_TxEVM2;
287 ts->ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
288 ts->ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
289 ts->ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
290 ts->ts_antenna = 0;
291
292 return 0;
293}
294
295static void ar9002_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
296 u32 pktLen, enum ath9k_pkt_type type,
297 u32 txPower, u32 keyIx,
298 enum ath9k_key_type keyType, u32 flags)
299{
300 struct ar5416_desc *ads = AR5416DESC(ds);
301
302 txPower += ah->txpower_indexoffset;
303 if (txPower > 63)
304 txPower = 63;
305
306 ads->ds_ctl0 = (pktLen & AR_FrameLen)
307 | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
308 | SM(txPower, AR_XmitPower)
309 | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
310 | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
311 | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
312 | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
313
314 ads->ds_ctl1 =
315 (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
316 | SM(type, AR_FrameType)
317 | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
318 | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
319 | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
320
321 ads->ds_ctl6 = SM(keyType, AR_EncrType);
322
323 if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) {
324 ads->ds_ctl8 = 0;
325 ads->ds_ctl9 = 0;
326 ads->ds_ctl10 = 0;
327 ads->ds_ctl11 = 0;
328 }
329}
330
331static void ar9002_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
332 void *lastds,
333 u32 durUpdateEn, u32 rtsctsRate,
334 u32 rtsctsDuration,
335 struct ath9k_11n_rate_series series[],
336 u32 nseries, u32 flags)
337{
338 struct ar5416_desc *ads = AR5416DESC(ds);
339 struct ar5416_desc *last_ads = AR5416DESC(lastds);
340 u32 ds_ctl0;
341
342 if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
343 ds_ctl0 = ads->ds_ctl0;
344
345 if (flags & ATH9K_TXDESC_RTSENA) {
346 ds_ctl0 &= ~AR_CTSEnable;
347 ds_ctl0 |= AR_RTSEnable;
348 } else {
349 ds_ctl0 &= ~AR_RTSEnable;
350 ds_ctl0 |= AR_CTSEnable;
351 }
352
353 ads->ds_ctl0 = ds_ctl0;
354 } else {
355 ads->ds_ctl0 =
356 (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
357 }
358
359 ads->ds_ctl2 = set11nTries(series, 0)
360 | set11nTries(series, 1)
361 | set11nTries(series, 2)
362 | set11nTries(series, 3)
363 | (durUpdateEn ? AR_DurUpdateEna : 0)
364 | SM(0, AR_BurstDur);
365
366 ads->ds_ctl3 = set11nRate(series, 0)
367 | set11nRate(series, 1)
368 | set11nRate(series, 2)
369 | set11nRate(series, 3);
370
371 ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
372 | set11nPktDurRTSCTS(series, 1);
373
374 ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
375 | set11nPktDurRTSCTS(series, 3);
376
377 ads->ds_ctl7 = set11nRateFlags(series, 0)
378 | set11nRateFlags(series, 1)
379 | set11nRateFlags(series, 2)
380 | set11nRateFlags(series, 3)
381 | SM(rtsctsRate, AR_RTSCTSRate);
382 last_ads->ds_ctl2 = ads->ds_ctl2;
383 last_ads->ds_ctl3 = ads->ds_ctl3;
384}
385
386static void ar9002_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
387 u32 aggrLen)
388{
389 struct ar5416_desc *ads = AR5416DESC(ds);
390
391 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
392 ads->ds_ctl6 &= ~AR_AggrLen;
393 ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
394}
395
396static void ar9002_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
397 u32 numDelims)
398{
399 struct ar5416_desc *ads = AR5416DESC(ds);
400 unsigned int ctl6;
401
402 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
403
404 ctl6 = ads->ds_ctl6;
405 ctl6 &= ~AR_PadDelim;
406 ctl6 |= SM(numDelims, AR_PadDelim);
407 ads->ds_ctl6 = ctl6;
408}
409
410static void ar9002_hw_set11n_aggr_last(struct ath_hw *ah, void *ds)
411{
412 struct ar5416_desc *ads = AR5416DESC(ds);
413
414 ads->ds_ctl1 |= AR_IsAggr;
415 ads->ds_ctl1 &= ~AR_MoreAggr;
416 ads->ds_ctl6 &= ~AR_PadDelim;
417}
418
419static void ar9002_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
420{
421 struct ar5416_desc *ads = AR5416DESC(ds);
422
423 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
424}
425
426static void ar9002_hw_set11n_burstduration(struct ath_hw *ah, void *ds,
427 u32 burstDuration)
428{
429 struct ar5416_desc *ads = AR5416DESC(ds);
430
431 ads->ds_ctl2 &= ~AR_BurstDur;
432 ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
433}
434
435static void ar9002_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
436 u32 vmf)
437{
438 struct ar5416_desc *ads = AR5416DESC(ds);
439
440 if (vmf)
441 ads->ds_ctl0 |= AR_VirtMoreFrag;
442 else
443 ads->ds_ctl0 &= ~AR_VirtMoreFrag;
444}
445
446void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
447 u32 size, u32 flags)
448{
449 struct ar5416_desc *ads = AR5416DESC(ds);
450 struct ath9k_hw_capabilities *pCap = &ah->caps;
451
452 ads->ds_ctl1 = size & AR_BufLen;
453 if (flags & ATH9K_RXDESC_INTREQ)
454 ads->ds_ctl1 |= AR_RxIntrReq;
455
456 ads->ds_rxstatus8 &= ~AR_RxDone;
457 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
458 memset(&(ads->u), 0, sizeof(ads->u));
459}
460EXPORT_SYMBOL(ath9k_hw_setuprxdesc);
461
462void ar9002_hw_attach_mac_ops(struct ath_hw *ah)
463{
464 struct ath_hw_ops *ops = ath9k_hw_ops(ah);
465
466 ops->rx_enable = ar9002_hw_rx_enable;
467 ops->set_desc_link = ar9002_hw_set_desc_link;
468 ops->get_desc_link = ar9002_hw_get_desc_link;
469 ops->get_isr = ar9002_hw_get_isr;
470 ops->fill_txdesc = ar9002_hw_fill_txdesc;
471 ops->proc_txdesc = ar9002_hw_proc_txdesc;
472 ops->set11n_txdesc = ar9002_hw_set11n_txdesc;
473 ops->set11n_ratescenario = ar9002_hw_set11n_ratescenario;
474 ops->set11n_aggr_first = ar9002_hw_set11n_aggr_first;
475 ops->set11n_aggr_middle = ar9002_hw_set11n_aggr_middle;
476 ops->set11n_aggr_last = ar9002_hw_set11n_aggr_last;
477 ops->clr11n_aggr = ar9002_hw_clr11n_aggr;
478 ops->set11n_burstduration = ar9002_hw_set11n_burstduration;
479 ops->set11n_virtualmorefrag = ar9002_hw_set11n_virtualmorefrag;
480}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
new file mode 100644
index 000000000000..ed314e89bfe1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -0,0 +1,535 @@
1/*
2 * Copyright (c) 2008-2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/**
18 * DOC: Programming Atheros 802.11n analog front end radios
19 *
20 * AR5416 MAC based PCI devices and AR518 MAC based PCI-Express
21 * devices have either an external AR2133 analog front end radio for single
22 * band 2.4 GHz communication or an AR5133 analog front end radio for dual
23 * band 2.4 GHz / 5 GHz communication.
24 *
25 * All devices after the AR5416 and AR5418 family starting with the AR9280
26 * have their analog front radios, MAC/BB and host PCIe/USB interface embedded
27 * into a single-chip and require less programming.
28 *
29 * The following single-chips exist with a respective embedded radio:
30 *
31 * AR9280 - 11n dual-band 2x2 MIMO for PCIe
32 * AR9281 - 11n single-band 1x2 MIMO for PCIe
33 * AR9285 - 11n single-band 1x1 for PCIe
34 * AR9287 - 11n single-band 2x2 MIMO for PCIe
35 *
36 * AR9220 - 11n dual-band 2x2 MIMO for PCI
37 * AR9223 - 11n single-band 2x2 MIMO for PCI
38 *
39 * AR9287 - 11n single-band 1x1 MIMO for USB
40 */
41
42#include "hw.h"
43#include "ar9002_phy.h"
44
45/**
46 * ar9002_hw_set_channel - set channel on single-chip device
47 * @ah: atheros hardware structure
48 * @chan:
49 *
50 * This is the function to change channel on single-chip devices, that is
51 * all devices after ar9280.
52 *
53 * This function takes the channel value in MHz and sets
54 * hardware channel value. Assumes writes have been enabled to analog bus.
55 *
56 * Actual Expression,
57 *
58 * For 2GHz channel,
59 * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
60 * (freq_ref = 40MHz)
61 *
62 * For 5GHz channel,
63 * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
64 * (freq_ref = 40MHz/(24>>amodeRefSel))
65 */
66static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
67{
68 u16 bMode, fracMode, aModeRefSel = 0;
69 u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0;
70 struct chan_centers centers;
71 u32 refDivA = 24;
72
73 ath9k_hw_get_channel_centers(ah, chan, &centers);
74 freq = centers.synth_center;
75
76 reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL);
77 reg32 &= 0xc0000000;
78
79 if (freq < 4800) { /* 2 GHz, fractional mode */
80 u32 txctl;
81 int regWrites = 0;
82
83 bMode = 1;
84 fracMode = 1;
85 aModeRefSel = 0;
86 channelSel = CHANSEL_2G(freq);
87
88 if (AR_SREV_9287_11_OR_LATER(ah)) {
89 if (freq == 2484) {
90 /* Enable channel spreading for channel 14 */
91 REG_WRITE_ARRAY(&ah->iniCckfirJapan2484,
92 1, regWrites);
93 } else {
94 REG_WRITE_ARRAY(&ah->iniCckfirNormal,
95 1, regWrites);
96 }
97 } else {
98 txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
99 if (freq == 2484) {
100 /* Enable channel spreading for channel 14 */
101 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
102 txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
103 } else {
104 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
105 txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
106 }
107 }
108 } else {
109 bMode = 0;
110 fracMode = 0;
111
112 switch (ah->eep_ops->get_eeprom(ah, EEP_FRAC_N_5G)) {
113 case 0:
114 if ((freq % 20) == 0)
115 aModeRefSel = 3;
116 else if ((freq % 10) == 0)
117 aModeRefSel = 2;
118 if (aModeRefSel)
119 break;
120 case 1:
121 default:
122 aModeRefSel = 0;
123 /*
124 * Enable 2G (fractional) mode for channels
125 * which are 5MHz spaced.
126 */
127 fracMode = 1;
128 refDivA = 1;
129 channelSel = CHANSEL_5G(freq);
130
131 /* RefDivA setting */
132 REG_RMW_FIELD(ah, AR_AN_SYNTH9,
133 AR_AN_SYNTH9_REFDIVA, refDivA);
134
135 }
136
137 if (!fracMode) {
138 ndiv = (freq * (refDivA >> aModeRefSel)) / 60;
139 channelSel = ndiv & 0x1ff;
140 channelFrac = (ndiv & 0xfffffe00) * 2;
141 channelSel = (channelSel << 17) | channelFrac;
142 }
143 }
144
145 reg32 = reg32 |
146 (bMode << 29) |
147 (fracMode << 28) | (aModeRefSel << 26) | (channelSel);
148
149 REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
150
151 ah->curchan = chan;
152 ah->curchan_rad_index = -1;
153
154 return 0;
155}
156
157/**
158 * ar9002_hw_spur_mitigate - convert baseband spur frequency
159 * @ah: atheros hardware structure
160 * @chan:
161 *
162 * For single-chip solutions. Converts to baseband spur frequency given the
163 * input channel frequency and compute register settings below.
164 */
165static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
166 struct ath9k_channel *chan)
167{
168 int bb_spur = AR_NO_SPUR;
169 int freq;
170 int bin, cur_bin;
171 int bb_spur_off, spur_subchannel_sd;
172 int spur_freq_sd;
173 int spur_delta_phase;
174 int denominator;
175 int upper, lower, cur_vit_mask;
176 int tmp, newVal;
177 int i;
178 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
179 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
180 };
181 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
182 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
183 };
184 int inc[4] = { 0, 100, 0, 0 };
185 struct chan_centers centers;
186
187 int8_t mask_m[123];
188 int8_t mask_p[123];
189 int8_t mask_amt;
190 int tmp_mask;
191 int cur_bb_spur;
192 bool is2GHz = IS_CHAN_2GHZ(chan);
193
194 memset(&mask_m, 0, sizeof(int8_t) * 123);
195 memset(&mask_p, 0, sizeof(int8_t) * 123);
196
197 ath9k_hw_get_channel_centers(ah, chan, &centers);
198 freq = centers.synth_center;
199
200 ah->config.spurmode = SPUR_ENABLE_EEPROM;
201 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
202 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
203
204 if (is2GHz)
205 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
206 else
207 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
208
209 if (AR_NO_SPUR == cur_bb_spur)
210 break;
211 cur_bb_spur = cur_bb_spur - freq;
212
213 if (IS_CHAN_HT40(chan)) {
214 if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) &&
215 (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) {
216 bb_spur = cur_bb_spur;
217 break;
218 }
219 } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) &&
220 (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) {
221 bb_spur = cur_bb_spur;
222 break;
223 }
224 }
225
226 if (AR_NO_SPUR == bb_spur) {
227 REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
228 AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
229 return;
230 } else {
231 REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
232 AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
233 }
234
235 bin = bb_spur * 320;
236
237 tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
238
239 ENABLE_REGWRITE_BUFFER(ah);
240
241 newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
242 AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
243 AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
244 AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
245 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal);
246
247 newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
248 AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
249 AR_PHY_SPUR_REG_MASK_RATE_SELECT |
250 AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
251 SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
252 REG_WRITE(ah, AR_PHY_SPUR_REG, newVal);
253
254 if (IS_CHAN_HT40(chan)) {
255 if (bb_spur < 0) {
256 spur_subchannel_sd = 1;
257 bb_spur_off = bb_spur + 10;
258 } else {
259 spur_subchannel_sd = 0;
260 bb_spur_off = bb_spur - 10;
261 }
262 } else {
263 spur_subchannel_sd = 0;
264 bb_spur_off = bb_spur;
265 }
266
267 if (IS_CHAN_HT40(chan))
268 spur_delta_phase =
269 ((bb_spur * 262144) /
270 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
271 else
272 spur_delta_phase =
273 ((bb_spur * 524288) /
274 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
275
276 denominator = IS_CHAN_2GHZ(chan) ? 44 : 40;
277 spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff;
278
279 newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
280 SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
281 SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
282 REG_WRITE(ah, AR_PHY_TIMING11, newVal);
283
284 newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
285 REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
286
287 cur_bin = -6000;
288 upper = bin + 100;
289 lower = bin - 100;
290
291 for (i = 0; i < 4; i++) {
292 int pilot_mask = 0;
293 int chan_mask = 0;
294 int bp = 0;
295 for (bp = 0; bp < 30; bp++) {
296 if ((cur_bin > lower) && (cur_bin < upper)) {
297 pilot_mask = pilot_mask | 0x1 << bp;
298 chan_mask = chan_mask | 0x1 << bp;
299 }
300 cur_bin += 100;
301 }
302 cur_bin += inc[i];
303 REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
304 REG_WRITE(ah, chan_mask_reg[i], chan_mask);
305 }
306
307 cur_vit_mask = 6100;
308 upper = bin + 120;
309 lower = bin - 120;
310
311 for (i = 0; i < 123; i++) {
312 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
313
314 /* workaround for gcc bug #37014 */
315 volatile int tmp_v = abs(cur_vit_mask - bin);
316
317 if (tmp_v < 75)
318 mask_amt = 1;
319 else
320 mask_amt = 0;
321 if (cur_vit_mask < 0)
322 mask_m[abs(cur_vit_mask / 100)] = mask_amt;
323 else
324 mask_p[cur_vit_mask / 100] = mask_amt;
325 }
326 cur_vit_mask -= 100;
327 }
328
329 tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
330 | (mask_m[48] << 26) | (mask_m[49] << 24)
331 | (mask_m[50] << 22) | (mask_m[51] << 20)
332 | (mask_m[52] << 18) | (mask_m[53] << 16)
333 | (mask_m[54] << 14) | (mask_m[55] << 12)
334 | (mask_m[56] << 10) | (mask_m[57] << 8)
335 | (mask_m[58] << 6) | (mask_m[59] << 4)
336 | (mask_m[60] << 2) | (mask_m[61] << 0);
337 REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
338 REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
339
340 tmp_mask = (mask_m[31] << 28)
341 | (mask_m[32] << 26) | (mask_m[33] << 24)
342 | (mask_m[34] << 22) | (mask_m[35] << 20)
343 | (mask_m[36] << 18) | (mask_m[37] << 16)
344 | (mask_m[48] << 14) | (mask_m[39] << 12)
345 | (mask_m[40] << 10) | (mask_m[41] << 8)
346 | (mask_m[42] << 6) | (mask_m[43] << 4)
347 | (mask_m[44] << 2) | (mask_m[45] << 0);
348 REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
349 REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
350
351 tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
352 | (mask_m[18] << 26) | (mask_m[18] << 24)
353 | (mask_m[20] << 22) | (mask_m[20] << 20)
354 | (mask_m[22] << 18) | (mask_m[22] << 16)
355 | (mask_m[24] << 14) | (mask_m[24] << 12)
356 | (mask_m[25] << 10) | (mask_m[26] << 8)
357 | (mask_m[27] << 6) | (mask_m[28] << 4)
358 | (mask_m[29] << 2) | (mask_m[30] << 0);
359 REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
360 REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
361
362 tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
363 | (mask_m[2] << 26) | (mask_m[3] << 24)
364 | (mask_m[4] << 22) | (mask_m[5] << 20)
365 | (mask_m[6] << 18) | (mask_m[7] << 16)
366 | (mask_m[8] << 14) | (mask_m[9] << 12)
367 | (mask_m[10] << 10) | (mask_m[11] << 8)
368 | (mask_m[12] << 6) | (mask_m[13] << 4)
369 | (mask_m[14] << 2) | (mask_m[15] << 0);
370 REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
371 REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
372
373 tmp_mask = (mask_p[15] << 28)
374 | (mask_p[14] << 26) | (mask_p[13] << 24)
375 | (mask_p[12] << 22) | (mask_p[11] << 20)
376 | (mask_p[10] << 18) | (mask_p[9] << 16)
377 | (mask_p[8] << 14) | (mask_p[7] << 12)
378 | (mask_p[6] << 10) | (mask_p[5] << 8)
379 | (mask_p[4] << 6) | (mask_p[3] << 4)
380 | (mask_p[2] << 2) | (mask_p[1] << 0);
381 REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
382 REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
383
384 tmp_mask = (mask_p[30] << 28)
385 | (mask_p[29] << 26) | (mask_p[28] << 24)
386 | (mask_p[27] << 22) | (mask_p[26] << 20)
387 | (mask_p[25] << 18) | (mask_p[24] << 16)
388 | (mask_p[23] << 14) | (mask_p[22] << 12)
389 | (mask_p[21] << 10) | (mask_p[20] << 8)
390 | (mask_p[19] << 6) | (mask_p[18] << 4)
391 | (mask_p[17] << 2) | (mask_p[16] << 0);
392 REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
393 REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
394
395 tmp_mask = (mask_p[45] << 28)
396 | (mask_p[44] << 26) | (mask_p[43] << 24)
397 | (mask_p[42] << 22) | (mask_p[41] << 20)
398 | (mask_p[40] << 18) | (mask_p[39] << 16)
399 | (mask_p[38] << 14) | (mask_p[37] << 12)
400 | (mask_p[36] << 10) | (mask_p[35] << 8)
401 | (mask_p[34] << 6) | (mask_p[33] << 4)
402 | (mask_p[32] << 2) | (mask_p[31] << 0);
403 REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
404 REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
405
406 tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
407 | (mask_p[59] << 26) | (mask_p[58] << 24)
408 | (mask_p[57] << 22) | (mask_p[56] << 20)
409 | (mask_p[55] << 18) | (mask_p[54] << 16)
410 | (mask_p[53] << 14) | (mask_p[52] << 12)
411 | (mask_p[51] << 10) | (mask_p[50] << 8)
412 | (mask_p[49] << 6) | (mask_p[48] << 4)
413 | (mask_p[47] << 2) | (mask_p[46] << 0);
414 REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
415 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
416
417 REGWRITE_BUFFER_FLUSH(ah);
418 DISABLE_REGWRITE_BUFFER(ah);
419}
420
421static void ar9002_olc_init(struct ath_hw *ah)
422{
423 u32 i;
424
425 if (!OLC_FOR_AR9280_20_LATER)
426 return;
427
428 if (OLC_FOR_AR9287_10_LATER) {
429 REG_SET_BIT(ah, AR_PHY_TX_PWRCTRL9,
430 AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL);
431 ath9k_hw_analog_shift_rmw(ah, AR9287_AN_TXPC0,
432 AR9287_AN_TXPC0_TXPCMODE,
433 AR9287_AN_TXPC0_TXPCMODE_S,
434 AR9287_AN_TXPC0_TXPCMODE_TEMPSENSE);
435 udelay(100);
436 } else {
437 for (i = 0; i < AR9280_TX_GAIN_TABLE_SIZE; i++)
438 ah->originalGain[i] =
439 MS(REG_READ(ah, AR_PHY_TX_GAIN_TBL1 + i * 4),
440 AR_PHY_TX_GAIN);
441 ah->PDADCdelta = 0;
442 }
443}
444
445static u32 ar9002_hw_compute_pll_control(struct ath_hw *ah,
446 struct ath9k_channel *chan)
447{
448 u32 pll;
449
450 pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
451
452 if (chan && IS_CHAN_HALF_RATE(chan))
453 pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
454 else if (chan && IS_CHAN_QUARTER_RATE(chan))
455 pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
456
457 if (chan && IS_CHAN_5GHZ(chan)) {
458 if (IS_CHAN_A_FAST_CLOCK(ah, chan))
459 pll = 0x142c;
460 else if (AR_SREV_9280_20(ah))
461 pll = 0x2850;
462 else
463 pll |= SM(0x28, AR_RTC_9160_PLL_DIV);
464 } else {
465 pll |= SM(0x2c, AR_RTC_9160_PLL_DIV);
466 }
467
468 return pll;
469}
470
471static void ar9002_hw_do_getnf(struct ath_hw *ah,
472 int16_t nfarray[NUM_NF_READINGS])
473{
474 struct ath_common *common = ath9k_hw_common(ah);
475 int16_t nf;
476
477 nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
478
479 if (nf & 0x100)
480 nf = 0 - ((nf ^ 0x1ff) + 1);
481 ath_print(common, ATH_DBG_CALIBRATE,
482 "NF calibrated [ctl] [chain 0] is %d\n", nf);
483
484 if (AR_SREV_9271(ah) && (nf >= -114))
485 nf = -116;
486
487 nfarray[0] = nf;
488
489 if (!AR_SREV_9285(ah) && !AR_SREV_9271(ah)) {
490 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
491 AR9280_PHY_CH1_MINCCA_PWR);
492
493 if (nf & 0x100)
494 nf = 0 - ((nf ^ 0x1ff) + 1);
495 ath_print(common, ATH_DBG_CALIBRATE,
496 "NF calibrated [ctl] [chain 1] is %d\n", nf);
497 nfarray[1] = nf;
498 }
499
500 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR9280_PHY_EXT_MINCCA_PWR);
501 if (nf & 0x100)
502 nf = 0 - ((nf ^ 0x1ff) + 1);
503 ath_print(common, ATH_DBG_CALIBRATE,
504 "NF calibrated [ext] [chain 0] is %d\n", nf);
505
506 if (AR_SREV_9271(ah) && (nf >= -114))
507 nf = -116;
508
509 nfarray[3] = nf;
510
511 if (!AR_SREV_9285(ah) && !AR_SREV_9271(ah)) {
512 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
513 AR9280_PHY_CH1_EXT_MINCCA_PWR);
514
515 if (nf & 0x100)
516 nf = 0 - ((nf ^ 0x1ff) + 1);
517 ath_print(common, ATH_DBG_CALIBRATE,
518 "NF calibrated [ext] [chain 1] is %d\n", nf);
519 nfarray[4] = nf;
520 }
521}
522
523void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
524{
525 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
526
527 priv_ops->set_rf_regs = NULL;
528 priv_ops->rf_alloc_ext_banks = NULL;
529 priv_ops->rf_free_ext_banks = NULL;
530 priv_ops->rf_set_freq = ar9002_hw_set_channel;
531 priv_ops->spur_mitigate_freq = ar9002_hw_spur_mitigate;
532 priv_ops->olc_init = ar9002_olc_init;
533 priv_ops->compute_pll_control = ar9002_hw_compute_pll_control;
534 priv_ops->do_getnf = ar9002_hw_do_getnf;
535}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
new file mode 100644
index 000000000000..81bf6e5840e1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
@@ -0,0 +1,572 @@
1/*
2 * Copyright (c) 2008-2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#ifndef AR9002_PHY_H
17#define AR9002_PHY_H
18
19#define AR_PHY_TEST 0x9800
20#define PHY_AGC_CLR 0x10000000
21#define RFSILENT_BB 0x00002000
22
23#define AR_PHY_TURBO 0x9804
24#define AR_PHY_FC_TURBO_MODE 0x00000001
25#define AR_PHY_FC_TURBO_SHORT 0x00000002
26#define AR_PHY_FC_DYN2040_EN 0x00000004
27#define AR_PHY_FC_DYN2040_PRI_ONLY 0x00000008
28#define AR_PHY_FC_DYN2040_PRI_CH 0x00000010
29/* For 25 MHz channel spacing -- not used but supported by hw */
30#define AR_PHY_FC_DYN2040_EXT_CH 0x00000020
31#define AR_PHY_FC_HT_EN 0x00000040
32#define AR_PHY_FC_SHORT_GI_40 0x00000080
33#define AR_PHY_FC_WALSH 0x00000100
34#define AR_PHY_FC_SINGLE_HT_LTF1 0x00000200
35#define AR_PHY_FC_ENABLE_DAC_FIFO 0x00000800
36
37#define AR_PHY_TEST2 0x9808
38
39#define AR_PHY_TIMING2 0x9810
40#define AR_PHY_TIMING3 0x9814
41#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000
42#define AR_PHY_TIMING3_DSC_MAN_S 17
43#define AR_PHY_TIMING3_DSC_EXP 0x0001E000
44#define AR_PHY_TIMING3_DSC_EXP_S 13
45
46#define AR_PHY_CHIP_ID_REV_0 0x80
47#define AR_PHY_CHIP_ID_REV_1 0x81
48#define AR_PHY_CHIP_ID_9160_REV_0 0xb0
49
50#define AR_PHY_ACTIVE 0x981C
51#define AR_PHY_ACTIVE_EN 0x00000001
52#define AR_PHY_ACTIVE_DIS 0x00000000
53
54#define AR_PHY_RF_CTL2 0x9824
55#define AR_PHY_TX_END_DATA_START 0x000000FF
56#define AR_PHY_TX_END_DATA_START_S 0
57#define AR_PHY_TX_END_PA_ON 0x0000FF00
58#define AR_PHY_TX_END_PA_ON_S 8
59
60#define AR_PHY_RF_CTL3 0x9828
61#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
62#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
63
64#define AR_PHY_ADC_CTL 0x982C
65#define AR_PHY_ADC_CTL_OFF_INBUFGAIN 0x00000003
66#define AR_PHY_ADC_CTL_OFF_INBUFGAIN_S 0
67#define AR_PHY_ADC_CTL_OFF_PWDDAC 0x00002000
68#define AR_PHY_ADC_CTL_OFF_PWDBANDGAP 0x00004000
69#define AR_PHY_ADC_CTL_OFF_PWDADC 0x00008000
70#define AR_PHY_ADC_CTL_ON_INBUFGAIN 0x00030000
71#define AR_PHY_ADC_CTL_ON_INBUFGAIN_S 16
72
73#define AR_PHY_ADC_SERIAL_CTL 0x9830
74#define AR_PHY_SEL_INTERNAL_ADDAC 0x00000000
75#define AR_PHY_SEL_EXTERNAL_RADIO 0x00000001
76
77#define AR_PHY_RF_CTL4 0x9834
78#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF 0xFF000000
79#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF_S 24
80#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF 0x00FF0000
81#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF_S 16
82#define AR_PHY_RF_CTL4_FRAME_XPAB_ON 0x0000FF00
83#define AR_PHY_RF_CTL4_FRAME_XPAB_ON_S 8
84#define AR_PHY_RF_CTL4_FRAME_XPAA_ON 0x000000FF
85#define AR_PHY_RF_CTL4_FRAME_XPAA_ON_S 0
86
87#define AR_PHY_TSTDAC_CONST 0x983c
88
89#define AR_PHY_SETTLING 0x9844
90#define AR_PHY_SETTLING_SWITCH 0x00003F80
91#define AR_PHY_SETTLING_SWITCH_S 7
92
93#define AR_PHY_RXGAIN 0x9848
94#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000
95#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12
96#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000
97#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18
98#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80
99#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7
100#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000
101#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14
102
103#define AR_PHY_DESIRED_SZ 0x9850
104#define AR_PHY_DESIRED_SZ_ADC 0x000000FF
105#define AR_PHY_DESIRED_SZ_ADC_S 0
106#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00
107#define AR_PHY_DESIRED_SZ_PGA_S 8
108#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000
109#define AR_PHY_DESIRED_SZ_TOT_DES_S 20
110
111#define AR_PHY_FIND_SIG 0x9858
112#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000
113#define AR_PHY_FIND_SIG_FIRSTEP_S 12
114#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
115#define AR_PHY_FIND_SIG_FIRPWR_S 18
116
117#define AR_PHY_AGC_CTL1 0x985C
118#define AR_PHY_AGC_CTL1_COARSE_LOW 0x00007F80
119#define AR_PHY_AGC_CTL1_COARSE_LOW_S 7
120#define AR_PHY_AGC_CTL1_COARSE_HIGH 0x003F8000
121#define AR_PHY_AGC_CTL1_COARSE_HIGH_S 15
122
123#define AR_PHY_CCA 0x9864
124#define AR_PHY_MINCCA_PWR 0x0FF80000
125#define AR_PHY_MINCCA_PWR_S 19
126#define AR_PHY_CCA_THRESH62 0x0007F000
127#define AR_PHY_CCA_THRESH62_S 12
128#define AR9280_PHY_MINCCA_PWR 0x1FF00000
129#define AR9280_PHY_MINCCA_PWR_S 20
130#define AR9280_PHY_CCA_THRESH62 0x000FF000
131#define AR9280_PHY_CCA_THRESH62_S 12
132
133#define AR_PHY_SFCORR_LOW 0x986C
134#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
135#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00
136#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
137#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000
138#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
139#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000
140#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
141
142#define AR_PHY_SFCORR 0x9868
143#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F
144#define AR_PHY_SFCORR_M2COUNT_THR_S 0
145#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000
146#define AR_PHY_SFCORR_M1_THRESH_S 17
147#define AR_PHY_SFCORR_M2_THRESH 0x7F000000
148#define AR_PHY_SFCORR_M2_THRESH_S 24
149
150#define AR_PHY_SLEEP_CTR_CONTROL 0x9870
151#define AR_PHY_SLEEP_CTR_LIMIT 0x9874
152#define AR_PHY_SYNTH_CONTROL 0x9874
153#define AR_PHY_SLEEP_SCAL 0x9878
154
155#define AR_PHY_PLL_CTL 0x987c
156#define AR_PHY_PLL_CTL_40 0xaa
157#define AR_PHY_PLL_CTL_40_5413 0x04
158#define AR_PHY_PLL_CTL_44 0xab
159#define AR_PHY_PLL_CTL_44_2133 0xeb
160#define AR_PHY_PLL_CTL_40_2133 0xea
161
162#define AR_PHY_SPECTRAL_SCAN 0x9910 /* AR9280 spectral scan configuration register */
163#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x1
164#define AR_PHY_SPECTRAL_SCAN_ENA 0x00000001 /* Enable spectral scan, reg 68, bit 0 */
165#define AR_PHY_SPECTRAL_SCAN_ENA_S 0 /* Enable spectral scan, reg 68, bit 0 */
166#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002 /* Activate spectral scan reg 68, bit 1*/
167#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1 /* Activate spectral scan reg 68, bit 1*/
168#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0 /* Interval for FFT reports, reg 68, bits 4-7*/
169#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
170#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00 /* Interval for FFT reports, reg 68, bits 8-15*/
171#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
172#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000 /* Number of reports, reg 68, bits 16-23*/
173#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
174#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000 /* Short repeat, reg 68, bit 24*/
175#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24 /* Short repeat, reg 68, bit 24*/
176
177#define AR_PHY_RX_DELAY 0x9914
178#define AR_PHY_SEARCH_START_DELAY 0x9918
179#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
180
181#define AR_PHY_TIMING_CTRL4(_i) (0x9920 + ((_i) << 12))
182#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF 0x01F
183#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF_S 0
184#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF 0x7E0
185#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF_S 5
186#define AR_PHY_TIMING_CTRL4_IQCORR_ENABLE 0x800
187#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX 0xF000
188#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX_S 12
189#define AR_PHY_TIMING_CTRL4_DO_CAL 0x10000
190
191#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI 0x80000000
192#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER 0x40000000
193#define AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK 0x20000000
194#define AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK 0x10000000
195
196#define AR_PHY_TIMING5 0x9924
197#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
198#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
199
200#define AR_PHY_POWER_TX_RATE1 0x9934
201#define AR_PHY_POWER_TX_RATE2 0x9938
202#define AR_PHY_POWER_TX_RATE_MAX 0x993c
203#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
204
205#define AR_PHY_FRAME_CTL 0x9944
206#define AR_PHY_FRAME_CTL_TX_CLIP 0x00000038
207#define AR_PHY_FRAME_CTL_TX_CLIP_S 3
208
209#define AR_PHY_TXPWRADJ 0x994C
210#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA 0x00000FC0
211#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA_S 6
212#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX 0x00FC0000
213#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX_S 18
214
215#define AR_PHY_RADAR_EXT 0x9940
216#define AR_PHY_RADAR_EXT_ENA 0x00004000
217
218#define AR_PHY_RADAR_0 0x9954
219#define AR_PHY_RADAR_0_ENA 0x00000001
220#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
221#define AR_PHY_RADAR_0_INBAND 0x0000003e
222#define AR_PHY_RADAR_0_INBAND_S 1
223#define AR_PHY_RADAR_0_PRSSI 0x00000FC0
224#define AR_PHY_RADAR_0_PRSSI_S 6
225#define AR_PHY_RADAR_0_HEIGHT 0x0003F000
226#define AR_PHY_RADAR_0_HEIGHT_S 12
227#define AR_PHY_RADAR_0_RRSSI 0x00FC0000
228#define AR_PHY_RADAR_0_RRSSI_S 18
229#define AR_PHY_RADAR_0_FIRPWR 0x7F000000
230#define AR_PHY_RADAR_0_FIRPWR_S 24
231
232#define AR_PHY_RADAR_1 0x9958
233#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
234#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
235#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
236#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
237#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
238#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
239#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
240#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
241#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
242#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
243#define AR_PHY_RADAR_1_MAXLEN_S 0
244
245#define AR_PHY_SWITCH_CHAIN_0 0x9960
246#define AR_PHY_SWITCH_COM 0x9964
247
248#define AR_PHY_SIGMA_DELTA 0x996C
249#define AR_PHY_SIGMA_DELTA_ADC_SEL 0x00000003
250#define AR_PHY_SIGMA_DELTA_ADC_SEL_S 0
251#define AR_PHY_SIGMA_DELTA_FILT2 0x000000F8
252#define AR_PHY_SIGMA_DELTA_FILT2_S 3
253#define AR_PHY_SIGMA_DELTA_FILT1 0x00001F00
254#define AR_PHY_SIGMA_DELTA_FILT1_S 8
255#define AR_PHY_SIGMA_DELTA_ADC_CLIP 0x01FFE000
256#define AR_PHY_SIGMA_DELTA_ADC_CLIP_S 13
257
258#define AR_PHY_RESTART 0x9970
259#define AR_PHY_RESTART_DIV_GC 0x001C0000
260#define AR_PHY_RESTART_DIV_GC_S 18
261
262#define AR_PHY_RFBUS_REQ 0x997C
263#define AR_PHY_RFBUS_REQ_EN 0x00000001
264
265#define AR_PHY_TIMING7 0x9980
266#define AR_PHY_TIMING8 0x9984
267#define AR_PHY_TIMING8_PILOT_MASK_2 0x000FFFFF
268#define AR_PHY_TIMING8_PILOT_MASK_2_S 0
269
270#define AR_PHY_BIN_MASK2_1 0x9988
271#define AR_PHY_BIN_MASK2_2 0x998c
272#define AR_PHY_BIN_MASK2_3 0x9990
273#define AR_PHY_BIN_MASK2_4 0x9994
274
275#define AR_PHY_BIN_MASK_1 0x9900
276#define AR_PHY_BIN_MASK_2 0x9904
277#define AR_PHY_BIN_MASK_3 0x9908
278
279#define AR_PHY_MASK_CTL 0x990c
280
281#define AR_PHY_BIN_MASK2_4_MASK_4 0x00003FFF
282#define AR_PHY_BIN_MASK2_4_MASK_4_S 0
283
284#define AR_PHY_TIMING9 0x9998
285#define AR_PHY_TIMING10 0x999c
286#define AR_PHY_TIMING10_PILOT_MASK_2 0x000FFFFF
287#define AR_PHY_TIMING10_PILOT_MASK_2_S 0
288
289#define AR_PHY_TIMING11 0x99a0
290#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF
291#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
292#define AR_PHY_TIMING11_USE_SPUR_IN_AGC 0x40000000
293#define AR_PHY_TIMING11_USE_SPUR_IN_SELFCOR 0x80000000
294
295#define AR_PHY_RX_CHAINMASK 0x99a4
296#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (0x99b4 + ((_i) << 12))
297#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
298#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
299
300#define AR_PHY_MULTICHAIN_GAIN_CTL 0x99ac
301#define AR_PHY_9285_ANT_DIV_CTL_ALL 0x7f000000
302#define AR_PHY_9285_ANT_DIV_CTL 0x01000000
303#define AR_PHY_9285_ANT_DIV_CTL_S 24
304#define AR_PHY_9285_ANT_DIV_ALT_LNACONF 0x06000000
305#define AR_PHY_9285_ANT_DIV_ALT_LNACONF_S 25
306#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF 0x18000000
307#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S 27
308#define AR_PHY_9285_ANT_DIV_ALT_GAINTB 0x20000000
309#define AR_PHY_9285_ANT_DIV_ALT_GAINTB_S 29
310#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB 0x40000000
311#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB_S 30
312#define AR_PHY_9285_ANT_DIV_LNA1 2
313#define AR_PHY_9285_ANT_DIV_LNA2 1
314#define AR_PHY_9285_ANT_DIV_LNA1_PLUS_LNA2 3
315#define AR_PHY_9285_ANT_DIV_LNA1_MINUS_LNA2 0
316#define AR_PHY_9285_ANT_DIV_GAINTB_0 0
317#define AR_PHY_9285_ANT_DIV_GAINTB_1 1
318
319#define AR_PHY_EXT_CCA0 0x99b8
320#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
321#define AR_PHY_EXT_CCA0_THRESH62_S 0
322
323#define AR_PHY_EXT_CCA 0x99bc
324#define AR_PHY_EXT_CCA_CYCPWR_THR1 0x0000FE00
325#define AR_PHY_EXT_CCA_CYCPWR_THR1_S 9
326#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
327#define AR_PHY_EXT_CCA_THRESH62_S 16
328#define AR_PHY_EXT_MINCCA_PWR 0xFF800000
329#define AR_PHY_EXT_MINCCA_PWR_S 23
330#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000
331#define AR9280_PHY_EXT_MINCCA_PWR_S 16
332
333#define AR_PHY_SFCORR_EXT 0x99c0
334#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F
335#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0
336#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80
337#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7
338#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000
339#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
340#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000
341#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
342#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
343
344#define AR_PHY_HALFGI 0x99D0
345#define AR_PHY_HALFGI_DSC_MAN 0x0007FFF0
346#define AR_PHY_HALFGI_DSC_MAN_S 4
347#define AR_PHY_HALFGI_DSC_EXP 0x0000000F
348#define AR_PHY_HALFGI_DSC_EXP_S 0
349
350#define AR_PHY_CHAN_INFO_MEMORY 0x99DC
351#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
352
353#define AR_PHY_HEAVY_CLIP_ENABLE 0x99E0
354
355#define AR_PHY_HEAVY_CLIP_FACTOR_RIFS 0x99EC
356#define AR_PHY_RIFS_INIT_DELAY 0x03ff0000
357
358#define AR_PHY_M_SLEEP 0x99f0
359#define AR_PHY_REFCLKDLY 0x99f4
360#define AR_PHY_REFCLKPD 0x99f8
361
362#define AR_PHY_CALMODE 0x99f0
363
364#define AR_PHY_CALMODE_IQ 0x00000000
365#define AR_PHY_CALMODE_ADC_GAIN 0x00000001
366#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002
367#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003
368
369#define AR_PHY_CAL_MEAS_0(_i) (0x9c10 + ((_i) << 12))
370#define AR_PHY_CAL_MEAS_1(_i) (0x9c14 + ((_i) << 12))
371#define AR_PHY_CAL_MEAS_2(_i) (0x9c18 + ((_i) << 12))
372#define AR_PHY_CAL_MEAS_3(_i) (0x9c1c + ((_i) << 12))
373
374#define AR_PHY_CURRENT_RSSI 0x9c1c
375#define AR9280_PHY_CURRENT_RSSI 0x9c3c
376
377#define AR_PHY_RFBUS_GRANT 0x9C20
378#define AR_PHY_RFBUS_GRANT_EN 0x00000001
379
380#define AR_PHY_CHAN_INFO_GAIN_DIFF 0x9CF4
381#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
382
383#define AR_PHY_CHAN_INFO_GAIN 0x9CFC
384
385#define AR_PHY_MODE 0xA200
386#define AR_PHY_MODE_ASYNCFIFO 0x80
387#define AR_PHY_MODE_AR2133 0x08
388#define AR_PHY_MODE_AR5111 0x00
389#define AR_PHY_MODE_AR5112 0x08
390#define AR_PHY_MODE_DYNAMIC 0x04
391#define AR_PHY_MODE_RF2GHZ 0x02
392#define AR_PHY_MODE_RF5GHZ 0x00
393#define AR_PHY_MODE_CCK 0x01
394#define AR_PHY_MODE_OFDM 0x00
395#define AR_PHY_MODE_DYN_CCK_DISABLE 0x100
396
397#define AR_PHY_CCK_TX_CTRL 0xA204
398#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
399#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK 0x0000000C
400#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK_S 2
401
402#define AR_PHY_CCK_DETECT 0xA208
403#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
404#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
405/* [12:6] settling time for antenna switch */
406#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
407#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
408#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
409#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV_S 13
410
411#define AR_PHY_GAIN_2GHZ 0xA20C
412#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN 0x00FC0000
413#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN_S 18
414#define AR_PHY_GAIN_2GHZ_BSW_MARGIN 0x00003C00
415#define AR_PHY_GAIN_2GHZ_BSW_MARGIN_S 10
416#define AR_PHY_GAIN_2GHZ_BSW_ATTEN 0x0000001F
417#define AR_PHY_GAIN_2GHZ_BSW_ATTEN_S 0
418
419#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN 0x003E0000
420#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN_S 17
421#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN 0x0001F000
422#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN_S 12
423#define AR_PHY_GAIN_2GHZ_XATTEN2_DB 0x00000FC0
424#define AR_PHY_GAIN_2GHZ_XATTEN2_DB_S 6
425#define AR_PHY_GAIN_2GHZ_XATTEN1_DB 0x0000003F
426#define AR_PHY_GAIN_2GHZ_XATTEN1_DB_S 0
427
428#define AR_PHY_CCK_RXCTRL4 0xA21C
429#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT 0x01F80000
430#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT_S 19
431
432#define AR_PHY_DAG_CTRLCCK 0xA228
433#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
434#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00
435#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
436
437#define AR_PHY_FORCE_CLKEN_CCK 0xA22C
438#define AR_PHY_FORCE_CLKEN_CCK_MRC_MUX 0x00000040
439
440#define AR_PHY_POWER_TX_RATE3 0xA234
441#define AR_PHY_POWER_TX_RATE4 0xA238
442
443#define AR_PHY_SCRM_SEQ_XR 0xA23C
444#define AR_PHY_HEADER_DETECT_XR 0xA240
445#define AR_PHY_CHIRP_DETECTED_XR 0xA244
446#define AR_PHY_BLUETOOTH 0xA254
447
448#define AR_PHY_TPCRG1 0xA258
449#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
450#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14
451
452#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000
453#define AR_PHY_TPCRG1_PD_GAIN_1_S 16
454#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000
455#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
456#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
457#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
458
459#define AR_PHY_TPCRG1_PD_CAL_ENABLE 0x00400000
460#define AR_PHY_TPCRG1_PD_CAL_ENABLE_S 22
461
462#define AR_PHY_TX_PWRCTRL4 0xa264
463#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID 0x00000001
464#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID_S 0
465#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT 0x000001FE
466#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT_S 1
467
468#define AR_PHY_TX_PWRCTRL6_0 0xa270
469#define AR_PHY_TX_PWRCTRL6_1 0xb270
470#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE 0x03000000
471#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE_S 24
472
473#define AR_PHY_TX_PWRCTRL7 0xa274
474#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN 0x01F80000
475#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN_S 19
476
477#define AR_PHY_TX_PWRCTRL9 0xa27C
478#define AR_PHY_TX_DESIRED_SCALE_CCK 0x00007C00
479#define AR_PHY_TX_DESIRED_SCALE_CCK_S 10
480#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL 0x80000000
481#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL_S 31
482
483#define AR_PHY_TX_GAIN_TBL1 0xa300
484#define AR_PHY_TX_GAIN 0x0007F000
485#define AR_PHY_TX_GAIN_S 12
486
487#define AR_PHY_CH0_TX_PWRCTRL11 0xa398
488#define AR_PHY_CH1_TX_PWRCTRL11 0xb398
489#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP 0x0000FC00
490#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP_S 10
491
492#define AR_PHY_VIT_MASK2_M_46_61 0xa3a0
493#define AR_PHY_MASK2_M_31_45 0xa3a4
494#define AR_PHY_MASK2_M_16_30 0xa3a8
495#define AR_PHY_MASK2_M_00_15 0xa3ac
496#define AR_PHY_MASK2_P_15_01 0xa3b8
497#define AR_PHY_MASK2_P_30_16 0xa3bc
498#define AR_PHY_MASK2_P_45_31 0xa3c0
499#define AR_PHY_MASK2_P_61_45 0xa3c4
500#define AR_PHY_SPUR_REG 0x994c
501
502#define AR_PHY_SPUR_REG_MASK_RATE_CNTL (0xFF << 18)
503#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
504
505#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000
506#define AR_PHY_SPUR_REG_MASK_RATE_SELECT (0xFF << 9)
507#define AR_PHY_SPUR_REG_MASK_RATE_SELECT_S 9
508#define AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI 0x100
509#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x7F
510#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
511
512#define AR_PHY_PILOT_MASK_01_30 0xa3b0
513#define AR_PHY_PILOT_MASK_31_60 0xa3b4
514
515#define AR_PHY_CHANNEL_MASK_01_30 0x99d4
516#define AR_PHY_CHANNEL_MASK_31_60 0x99d8
517
518#define AR_PHY_ANALOG_SWAP 0xa268
519#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
520
521#define AR_PHY_TPCRG5 0xA26C
522#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F
523#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
524#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0
525#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
526#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00
527#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
528#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000
529#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
530#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000
531#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
532
533/* Carrier leak calibration control, do it after AGC calibration */
534#define AR_PHY_CL_CAL_CTL 0xA358
535#define AR_PHY_CL_CAL_ENABLE 0x00000002
536#define AR_PHY_PARALLEL_CAL_ENABLE 0x00000001
537
538#define AR_PHY_POWER_TX_RATE5 0xA38C
539#define AR_PHY_POWER_TX_RATE6 0xA390
540
541#define AR_PHY_CAL_CHAINMASK 0xA39C
542
543#define AR_PHY_POWER_TX_SUB 0xA3C8
544#define AR_PHY_POWER_TX_RATE7 0xA3CC
545#define AR_PHY_POWER_TX_RATE8 0xA3D0
546#define AR_PHY_POWER_TX_RATE9 0xA3D4
547
548#define AR_PHY_XPA_CFG 0xA3D8
549#define AR_PHY_FORCE_XPA_CFG 0x000000001
550#define AR_PHY_FORCE_XPA_CFG_S 0
551
552#define AR_PHY_CH1_CCA 0xa864
553#define AR_PHY_CH1_MINCCA_PWR 0x0FF80000
554#define AR_PHY_CH1_MINCCA_PWR_S 19
555#define AR9280_PHY_CH1_MINCCA_PWR 0x1FF00000
556#define AR9280_PHY_CH1_MINCCA_PWR_S 20
557
558#define AR_PHY_CH2_CCA 0xb864
559#define AR_PHY_CH2_MINCCA_PWR 0x0FF80000
560#define AR_PHY_CH2_MINCCA_PWR_S 19
561
562#define AR_PHY_CH1_EXT_CCA 0xa9bc
563#define AR_PHY_CH1_EXT_MINCCA_PWR 0xFF800000
564#define AR_PHY_CH1_EXT_MINCCA_PWR_S 23
565#define AR9280_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000
566#define AR9280_PHY_CH1_EXT_MINCCA_PWR_S 16
567
568#define AR_PHY_CH2_EXT_CCA 0xb9bc
569#define AR_PHY_CH2_EXT_MINCCA_PWR 0xFF800000
570#define AR_PHY_CH2_EXT_MINCCA_PWR_S 23
571
572#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
new file mode 100644
index 000000000000..5fcafb460877
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -0,0 +1,803 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "hw.h"
18#include "hw-ops.h"
19#include "ar9003_phy.h"
20
21static void ar9003_hw_setup_calibration(struct ath_hw *ah,
22 struct ath9k_cal_list *currCal)
23{
24 struct ath_common *common = ath9k_hw_common(ah);
25
26 /* Select calibration to run */
27 switch (currCal->calData->calType) {
28 case IQ_MISMATCH_CAL:
29 /*
30 * Start calibration with
31 * 2^(INIT_IQCAL_LOG_COUNT_MAX+1) samples
32 */
33 REG_RMW_FIELD(ah, AR_PHY_TIMING4,
34 AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX,
35 currCal->calData->calCountMax);
36 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
37
38 ath_print(common, ATH_DBG_CALIBRATE,
39 "starting IQ Mismatch Calibration\n");
40
41 /* Kick-off cal */
42 REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL);
43 break;
44 case TEMP_COMP_CAL:
45 REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM,
46 AR_PHY_65NM_CH0_THERM_LOCAL, 1);
47 REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM,
48 AR_PHY_65NM_CH0_THERM_START, 1);
49
50 ath_print(common, ATH_DBG_CALIBRATE,
51 "starting Temperature Compensation Calibration\n");
52 break;
53 case ADC_DC_INIT_CAL:
54 case ADC_GAIN_CAL:
55 case ADC_DC_CAL:
56 /* Not yet */
57 break;
58 }
59}
60
61/*
62 * Generic calibration routine.
63 * Recalibrate the lower PHY chips to account for temperature/environment
64 * changes.
65 */
66static bool ar9003_hw_per_calibration(struct ath_hw *ah,
67 struct ath9k_channel *ichan,
68 u8 rxchainmask,
69 struct ath9k_cal_list *currCal)
70{
71 /* Cal is assumed not done until explicitly set below */
72 bool iscaldone = false;
73
74 /* Calibration in progress. */
75 if (currCal->calState == CAL_RUNNING) {
76 /* Check to see if it has finished. */
77 if (!(REG_READ(ah, AR_PHY_TIMING4) & AR_PHY_TIMING4_DO_CAL)) {
78 /*
79 * Accumulate cal measures for active chains
80 */
81 currCal->calData->calCollect(ah);
82 ah->cal_samples++;
83
84 if (ah->cal_samples >=
85 currCal->calData->calNumSamples) {
86 unsigned int i, numChains = 0;
87 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
88 if (rxchainmask & (1 << i))
89 numChains++;
90 }
91
92 /*
93 * Process accumulated data
94 */
95 currCal->calData->calPostProc(ah, numChains);
96
97 /* Calibration has finished. */
98 ichan->CalValid |= currCal->calData->calType;
99 currCal->calState = CAL_DONE;
100 iscaldone = true;
101 } else {
102 /*
103 * Set-up collection of another sub-sample until we
104 * get desired number
105 */
106 ar9003_hw_setup_calibration(ah, currCal);
107 }
108 }
109 } else if (!(ichan->CalValid & currCal->calData->calType)) {
110 /* If current cal is marked invalid in channel, kick it off */
111 ath9k_hw_reset_calibration(ah, currCal);
112 }
113
114 return iscaldone;
115}
116
117static bool ar9003_hw_calibrate(struct ath_hw *ah,
118 struct ath9k_channel *chan,
119 u8 rxchainmask,
120 bool longcal)
121{
122 bool iscaldone = true;
123 struct ath9k_cal_list *currCal = ah->cal_list_curr;
124
125 /*
126 * For given calibration:
127 * 1. Call generic cal routine
128 * 2. When this cal is done (isCalDone) if we have more cals waiting
129 * (eg after reset), mask this to upper layers by not propagating
130 * isCalDone if it is set to TRUE.
131 * Instead, change isCalDone to FALSE and setup the waiting cal(s)
132 * to be run.
133 */
134 if (currCal &&
135 (currCal->calState == CAL_RUNNING ||
136 currCal->calState == CAL_WAITING)) {
137 iscaldone = ar9003_hw_per_calibration(ah, chan,
138 rxchainmask, currCal);
139 if (iscaldone) {
140 ah->cal_list_curr = currCal = currCal->calNext;
141
142 if (currCal->calState == CAL_WAITING) {
143 iscaldone = false;
144 ath9k_hw_reset_calibration(ah, currCal);
145 }
146 }
147 }
148
149 /* Do NF cal only at longer intervals */
150 if (longcal) {
151 /*
152 * Load the NF from history buffer of the current channel.
153 * NF is slow time-variant, so it is OK to use a historical
154 * value.
155 */
156 ath9k_hw_loadnf(ah, ah->curchan);
157
158 /* start NF calibration, without updating BB NF register */
159 ath9k_hw_start_nfcal(ah);
160 }
161
162 return iscaldone;
163}
164
165static void ar9003_hw_iqcal_collect(struct ath_hw *ah)
166{
167 int i;
168
169 /* Accumulate IQ cal measures for active chains */
170 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
171 ah->totalPowerMeasI[i] +=
172 REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
173 ah->totalPowerMeasQ[i] +=
174 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
175 ah->totalIqCorrMeas[i] +=
176 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
177 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
178 "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
179 ah->cal_samples, i, ah->totalPowerMeasI[i],
180 ah->totalPowerMeasQ[i],
181 ah->totalIqCorrMeas[i]);
182 }
183}
184
185static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
186{
187 struct ath_common *common = ath9k_hw_common(ah);
188 u32 powerMeasQ, powerMeasI, iqCorrMeas;
189 u32 qCoffDenom, iCoffDenom;
190 int32_t qCoff, iCoff;
191 int iqCorrNeg, i;
192 const u_int32_t offset_array[3] = {
193 AR_PHY_RX_IQCAL_CORR_B0,
194 AR_PHY_RX_IQCAL_CORR_B1,
195 AR_PHY_RX_IQCAL_CORR_B2,
196 };
197
198 for (i = 0; i < numChains; i++) {
199 powerMeasI = ah->totalPowerMeasI[i];
200 powerMeasQ = ah->totalPowerMeasQ[i];
201 iqCorrMeas = ah->totalIqCorrMeas[i];
202
203 ath_print(common, ATH_DBG_CALIBRATE,
204 "Starting IQ Cal and Correction for Chain %d\n",
205 i);
206
207 ath_print(common, ATH_DBG_CALIBRATE,
208 "Orignal: Chn %diq_corr_meas = 0x%08x\n",
209 i, ah->totalIqCorrMeas[i]);
210
211 iqCorrNeg = 0;
212
213 if (iqCorrMeas > 0x80000000) {
214 iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
215 iqCorrNeg = 1;
216 }
217
218 ath_print(common, ATH_DBG_CALIBRATE,
219 "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
220 ath_print(common, ATH_DBG_CALIBRATE,
221 "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
222 ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
223 iqCorrNeg);
224
225 iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 256;
226 qCoffDenom = powerMeasQ / 64;
227
228 if ((iCoffDenom != 0) && (qCoffDenom != 0)) {
229 iCoff = iqCorrMeas / iCoffDenom;
230 qCoff = powerMeasI / qCoffDenom - 64;
231 ath_print(common, ATH_DBG_CALIBRATE,
232 "Chn %d iCoff = 0x%08x\n", i, iCoff);
233 ath_print(common, ATH_DBG_CALIBRATE,
234 "Chn %d qCoff = 0x%08x\n", i, qCoff);
235
236 /* Force bounds on iCoff */
237 if (iCoff >= 63)
238 iCoff = 63;
239 else if (iCoff <= -63)
240 iCoff = -63;
241
242 /* Negate iCoff if iqCorrNeg == 0 */
243 if (iqCorrNeg == 0x0)
244 iCoff = -iCoff;
245
246 /* Force bounds on qCoff */
247 if (qCoff >= 63)
248 qCoff = 63;
249 else if (qCoff <= -63)
250 qCoff = -63;
251
252 iCoff = iCoff & 0x7f;
253 qCoff = qCoff & 0x7f;
254
255 ath_print(common, ATH_DBG_CALIBRATE,
256 "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
257 i, iCoff, qCoff);
258 ath_print(common, ATH_DBG_CALIBRATE,
259 "Register offset (0x%04x) "
260 "before update = 0x%x\n",
261 offset_array[i],
262 REG_READ(ah, offset_array[i]));
263
264 REG_RMW_FIELD(ah, offset_array[i],
265 AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
266 iCoff);
267 REG_RMW_FIELD(ah, offset_array[i],
268 AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
269 qCoff);
270 ath_print(common, ATH_DBG_CALIBRATE,
271 "Register offset (0x%04x) QI COFF "
272 "(bitfields 0x%08x) after update = 0x%x\n",
273 offset_array[i],
274 AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
275 REG_READ(ah, offset_array[i]));
276 ath_print(common, ATH_DBG_CALIBRATE,
277 "Register offset (0x%04x) QQ COFF "
278 "(bitfields 0x%08x) after update = 0x%x\n",
279 offset_array[i],
280 AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
281 REG_READ(ah, offset_array[i]));
282
283 ath_print(common, ATH_DBG_CALIBRATE,
284 "IQ Cal and Correction done for Chain %d\n",
285 i);
286 }
287 }
288
289 REG_SET_BIT(ah, AR_PHY_RX_IQCAL_CORR_B0,
290 AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE);
291 ath_print(common, ATH_DBG_CALIBRATE,
292 "IQ Cal and Correction (offset 0x%04x) enabled "
293 "(bit position 0x%08x). New Value 0x%08x\n",
294 (unsigned) (AR_PHY_RX_IQCAL_CORR_B0),
295 AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE,
296 REG_READ(ah, AR_PHY_RX_IQCAL_CORR_B0));
297}
298
299static const struct ath9k_percal_data iq_cal_single_sample = {
300 IQ_MISMATCH_CAL,
301 MIN_CAL_SAMPLES,
302 PER_MAX_LOG_COUNT,
303 ar9003_hw_iqcal_collect,
304 ar9003_hw_iqcalibrate
305};
306
307static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
308{
309 ah->iq_caldata.calData = &iq_cal_single_sample;
310 ah->supp_cals = IQ_MISMATCH_CAL;
311}
312
313static bool ar9003_hw_iscal_supported(struct ath_hw *ah,
314 enum ath9k_cal_types calType)
315{
316 switch (calType & ah->supp_cals) {
317 case IQ_MISMATCH_CAL:
318 /*
319 * XXX: Run IQ Mismatch for non-CCK only
320 * Note that CHANNEL_B is never set though.
321 */
322 return true;
323 case ADC_GAIN_CAL:
324 case ADC_DC_CAL:
325 return false;
326 case TEMP_COMP_CAL:
327 return true;
328 }
329
330 return false;
331}
332
333/*
334 * solve 4x4 linear equation used in loopback iq cal.
335 */
336static bool ar9003_hw_solve_iq_cal(struct ath_hw *ah,
337 s32 sin_2phi_1,
338 s32 cos_2phi_1,
339 s32 sin_2phi_2,
340 s32 cos_2phi_2,
341 s32 mag_a0_d0,
342 s32 phs_a0_d0,
343 s32 mag_a1_d0,
344 s32 phs_a1_d0,
345 s32 solved_eq[])
346{
347 s32 f1 = cos_2phi_1 - cos_2phi_2,
348 f3 = sin_2phi_1 - sin_2phi_2,
349 f2;
350 s32 mag_tx, phs_tx, mag_rx, phs_rx;
351 const s32 result_shift = 1 << 15;
352 struct ath_common *common = ath9k_hw_common(ah);
353
354 f2 = (f1 * f1 + f3 * f3) / result_shift;
355
356 if (!f2) {
357 ath_print(common, ATH_DBG_CALIBRATE, "Divide by 0\n");
358 return false;
359 }
360
361 /* mag mismatch, tx */
362 mag_tx = f1 * (mag_a0_d0 - mag_a1_d0) + f3 * (phs_a0_d0 - phs_a1_d0);
363 /* phs mismatch, tx */
364 phs_tx = f3 * (-mag_a0_d0 + mag_a1_d0) + f1 * (phs_a0_d0 - phs_a1_d0);
365
366 mag_tx = (mag_tx / f2);
367 phs_tx = (phs_tx / f2);
368
369 /* mag mismatch, rx */
370 mag_rx = mag_a0_d0 - (cos_2phi_1 * mag_tx + sin_2phi_1 * phs_tx) /
371 result_shift;
372 /* phs mismatch, rx */
373 phs_rx = phs_a0_d0 + (sin_2phi_1 * mag_tx - cos_2phi_1 * phs_tx) /
374 result_shift;
375
376 solved_eq[0] = mag_tx;
377 solved_eq[1] = phs_tx;
378 solved_eq[2] = mag_rx;
379 solved_eq[3] = phs_rx;
380
381 return true;
382}
383
384static s32 ar9003_hw_find_mag_approx(struct ath_hw *ah, s32 in_re, s32 in_im)
385{
386 s32 abs_i = abs(in_re),
387 abs_q = abs(in_im),
388 max_abs, min_abs;
389
390 if (abs_i > abs_q) {
391 max_abs = abs_i;
392 min_abs = abs_q;
393 } else {
394 max_abs = abs_q;
395 min_abs = abs_i;
396 }
397
398 return max_abs - (max_abs / 32) + (min_abs / 8) + (min_abs / 4);
399}
400
401#define DELPT 32
402
403static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
404 s32 chain_idx,
405 const s32 iq_res[],
406 s32 iqc_coeff[])
407{
408 s32 i2_m_q2_a0_d0, i2_p_q2_a0_d0, iq_corr_a0_d0,
409 i2_m_q2_a0_d1, i2_p_q2_a0_d1, iq_corr_a0_d1,
410 i2_m_q2_a1_d0, i2_p_q2_a1_d0, iq_corr_a1_d0,
411 i2_m_q2_a1_d1, i2_p_q2_a1_d1, iq_corr_a1_d1;
412 s32 mag_a0_d0, mag_a1_d0, mag_a0_d1, mag_a1_d1,
413 phs_a0_d0, phs_a1_d0, phs_a0_d1, phs_a1_d1,
414 sin_2phi_1, cos_2phi_1,
415 sin_2phi_2, cos_2phi_2;
416 s32 mag_tx, phs_tx, mag_rx, phs_rx;
417 s32 solved_eq[4], mag_corr_tx, phs_corr_tx, mag_corr_rx, phs_corr_rx,
418 q_q_coff, q_i_coff;
419 const s32 res_scale = 1 << 15;
420 const s32 delpt_shift = 1 << 8;
421 s32 mag1, mag2;
422 struct ath_common *common = ath9k_hw_common(ah);
423
424 i2_m_q2_a0_d0 = iq_res[0] & 0xfff;
425 i2_p_q2_a0_d0 = (iq_res[0] >> 12) & 0xfff;
426 iq_corr_a0_d0 = ((iq_res[0] >> 24) & 0xff) + ((iq_res[1] & 0xf) << 8);
427
428 if (i2_m_q2_a0_d0 > 0x800)
429 i2_m_q2_a0_d0 = -((0xfff - i2_m_q2_a0_d0) + 1);
430
431 if (i2_p_q2_a0_d0 > 0x800)
432 i2_p_q2_a0_d0 = -((0xfff - i2_p_q2_a0_d0) + 1);
433
434 if (iq_corr_a0_d0 > 0x800)
435 iq_corr_a0_d0 = -((0xfff - iq_corr_a0_d0) + 1);
436
437 i2_m_q2_a0_d1 = (iq_res[1] >> 4) & 0xfff;
438 i2_p_q2_a0_d1 = (iq_res[2] & 0xfff);
439 iq_corr_a0_d1 = (iq_res[2] >> 12) & 0xfff;
440
441 if (i2_m_q2_a0_d1 > 0x800)
442 i2_m_q2_a0_d1 = -((0xfff - i2_m_q2_a0_d1) + 1);
443
444 if (i2_p_q2_a0_d1 > 0x800)
445 i2_p_q2_a0_d1 = -((0xfff - i2_p_q2_a0_d1) + 1);
446
447 if (iq_corr_a0_d1 > 0x800)
448 iq_corr_a0_d1 = -((0xfff - iq_corr_a0_d1) + 1);
449
450 i2_m_q2_a1_d0 = ((iq_res[2] >> 24) & 0xff) + ((iq_res[3] & 0xf) << 8);
451 i2_p_q2_a1_d0 = (iq_res[3] >> 4) & 0xfff;
452 iq_corr_a1_d0 = iq_res[4] & 0xfff;
453
454 if (i2_m_q2_a1_d0 > 0x800)
455 i2_m_q2_a1_d0 = -((0xfff - i2_m_q2_a1_d0) + 1);
456
457 if (i2_p_q2_a1_d0 > 0x800)
458 i2_p_q2_a1_d0 = -((0xfff - i2_p_q2_a1_d0) + 1);
459
460 if (iq_corr_a1_d0 > 0x800)
461 iq_corr_a1_d0 = -((0xfff - iq_corr_a1_d0) + 1);
462
463 i2_m_q2_a1_d1 = (iq_res[4] >> 12) & 0xfff;
464 i2_p_q2_a1_d1 = ((iq_res[4] >> 24) & 0xff) + ((iq_res[5] & 0xf) << 8);
465 iq_corr_a1_d1 = (iq_res[5] >> 4) & 0xfff;
466
467 if (i2_m_q2_a1_d1 > 0x800)
468 i2_m_q2_a1_d1 = -((0xfff - i2_m_q2_a1_d1) + 1);
469
470 if (i2_p_q2_a1_d1 > 0x800)
471 i2_p_q2_a1_d1 = -((0xfff - i2_p_q2_a1_d1) + 1);
472
473 if (iq_corr_a1_d1 > 0x800)
474 iq_corr_a1_d1 = -((0xfff - iq_corr_a1_d1) + 1);
475
476 if ((i2_p_q2_a0_d0 == 0) || (i2_p_q2_a0_d1 == 0) ||
477 (i2_p_q2_a1_d0 == 0) || (i2_p_q2_a1_d1 == 0)) {
478 ath_print(common, ATH_DBG_CALIBRATE,
479 "Divide by 0:\na0_d0=%d\n"
480 "a0_d1=%d\na2_d0=%d\na1_d1=%d\n",
481 i2_p_q2_a0_d0, i2_p_q2_a0_d1,
482 i2_p_q2_a1_d0, i2_p_q2_a1_d1);
483 return false;
484 }
485
486 mag_a0_d0 = (i2_m_q2_a0_d0 * res_scale) / i2_p_q2_a0_d0;
487 phs_a0_d0 = (iq_corr_a0_d0 * res_scale) / i2_p_q2_a0_d0;
488
489 mag_a0_d1 = (i2_m_q2_a0_d1 * res_scale) / i2_p_q2_a0_d1;
490 phs_a0_d1 = (iq_corr_a0_d1 * res_scale) / i2_p_q2_a0_d1;
491
492 mag_a1_d0 = (i2_m_q2_a1_d0 * res_scale) / i2_p_q2_a1_d0;
493 phs_a1_d0 = (iq_corr_a1_d0 * res_scale) / i2_p_q2_a1_d0;
494
495 mag_a1_d1 = (i2_m_q2_a1_d1 * res_scale) / i2_p_q2_a1_d1;
496 phs_a1_d1 = (iq_corr_a1_d1 * res_scale) / i2_p_q2_a1_d1;
497
498 /* w/o analog phase shift */
499 sin_2phi_1 = (((mag_a0_d0 - mag_a0_d1) * delpt_shift) / DELPT);
500 /* w/o analog phase shift */
501 cos_2phi_1 = (((phs_a0_d1 - phs_a0_d0) * delpt_shift) / DELPT);
502 /* w/ analog phase shift */
503 sin_2phi_2 = (((mag_a1_d0 - mag_a1_d1) * delpt_shift) / DELPT);
504 /* w/ analog phase shift */
505 cos_2phi_2 = (((phs_a1_d1 - phs_a1_d0) * delpt_shift) / DELPT);
506
507 /*
508 * force sin^2 + cos^2 = 1;
509 * find magnitude by approximation
510 */
511 mag1 = ar9003_hw_find_mag_approx(ah, cos_2phi_1, sin_2phi_1);
512 mag2 = ar9003_hw_find_mag_approx(ah, cos_2phi_2, sin_2phi_2);
513
514 if ((mag1 == 0) || (mag2 == 0)) {
515 ath_print(common, ATH_DBG_CALIBRATE,
516 "Divide by 0: mag1=%d, mag2=%d\n",
517 mag1, mag2);
518 return false;
519 }
520
521 /* normalization sin and cos by mag */
522 sin_2phi_1 = (sin_2phi_1 * res_scale / mag1);
523 cos_2phi_1 = (cos_2phi_1 * res_scale / mag1);
524 sin_2phi_2 = (sin_2phi_2 * res_scale / mag2);
525 cos_2phi_2 = (cos_2phi_2 * res_scale / mag2);
526
527 /* calculate IQ mismatch */
528 if (!ar9003_hw_solve_iq_cal(ah,
529 sin_2phi_1, cos_2phi_1,
530 sin_2phi_2, cos_2phi_2,
531 mag_a0_d0, phs_a0_d0,
532 mag_a1_d0,
533 phs_a1_d0, solved_eq)) {
534 ath_print(common, ATH_DBG_CALIBRATE,
535 "Call to ar9003_hw_solve_iq_cal() failed.\n");
536 return false;
537 }
538
539 mag_tx = solved_eq[0];
540 phs_tx = solved_eq[1];
541 mag_rx = solved_eq[2];
542 phs_rx = solved_eq[3];
543
544 ath_print(common, ATH_DBG_CALIBRATE,
545 "chain %d: mag mismatch=%d phase mismatch=%d\n",
546 chain_idx, mag_tx/res_scale, phs_tx/res_scale);
547
548 if (res_scale == mag_tx) {
549 ath_print(common, ATH_DBG_CALIBRATE,
550 "Divide by 0: mag_tx=%d, res_scale=%d\n",
551 mag_tx, res_scale);
552 return false;
553 }
554
555 /* calculate and quantize Tx IQ correction factor */
556 mag_corr_tx = (mag_tx * res_scale) / (res_scale - mag_tx);
557 phs_corr_tx = -phs_tx;
558
559 q_q_coff = (mag_corr_tx * 128 / res_scale);
560 q_i_coff = (phs_corr_tx * 256 / res_scale);
561
562 ath_print(common, ATH_DBG_CALIBRATE,
563 "tx chain %d: mag corr=%d phase corr=%d\n",
564 chain_idx, q_q_coff, q_i_coff);
565
566 if (q_i_coff < -63)
567 q_i_coff = -63;
568 if (q_i_coff > 63)
569 q_i_coff = 63;
570 if (q_q_coff < -63)
571 q_q_coff = -63;
572 if (q_q_coff > 63)
573 q_q_coff = 63;
574
575 iqc_coeff[0] = (q_q_coff * 128) + q_i_coff;
576
577 ath_print(common, ATH_DBG_CALIBRATE,
578 "tx chain %d: iq corr coeff=%x\n",
579 chain_idx, iqc_coeff[0]);
580
581 if (-mag_rx == res_scale) {
582 ath_print(common, ATH_DBG_CALIBRATE,
583 "Divide by 0: mag_rx=%d, res_scale=%d\n",
584 mag_rx, res_scale);
585 return false;
586 }
587
588 /* calculate and quantize Rx IQ correction factors */
589 mag_corr_rx = (-mag_rx * res_scale) / (res_scale + mag_rx);
590 phs_corr_rx = -phs_rx;
591
592 q_q_coff = (mag_corr_rx * 128 / res_scale);
593 q_i_coff = (phs_corr_rx * 256 / res_scale);
594
595 ath_print(common, ATH_DBG_CALIBRATE,
596 "rx chain %d: mag corr=%d phase corr=%d\n",
597 chain_idx, q_q_coff, q_i_coff);
598
599 if (q_i_coff < -63)
600 q_i_coff = -63;
601 if (q_i_coff > 63)
602 q_i_coff = 63;
603 if (q_q_coff < -63)
604 q_q_coff = -63;
605 if (q_q_coff > 63)
606 q_q_coff = 63;
607
608 iqc_coeff[1] = (q_q_coff * 128) + q_i_coff;
609
610 ath_print(common, ATH_DBG_CALIBRATE,
611 "rx chain %d: iq corr coeff=%x\n",
612 chain_idx, iqc_coeff[1]);
613
614 return true;
615}
616
617static void ar9003_hw_tx_iq_cal(struct ath_hw *ah)
618{
619 struct ath_common *common = ath9k_hw_common(ah);
620 const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
621 AR_PHY_TX_IQCAL_STATUS_B0,
622 AR_PHY_TX_IQCAL_STATUS_B1,
623 AR_PHY_TX_IQCAL_STATUS_B2,
624 };
625 const u32 tx_corr_coeff[AR9300_MAX_CHAINS] = {
626 AR_PHY_TX_IQCAL_CORR_COEFF_01_B0,
627 AR_PHY_TX_IQCAL_CORR_COEFF_01_B1,
628 AR_PHY_TX_IQCAL_CORR_COEFF_01_B2,
629 };
630 const u32 rx_corr[AR9300_MAX_CHAINS] = {
631 AR_PHY_RX_IQCAL_CORR_B0,
632 AR_PHY_RX_IQCAL_CORR_B1,
633 AR_PHY_RX_IQCAL_CORR_B2,
634 };
635 const u_int32_t chan_info_tab[] = {
636 AR_PHY_CHAN_INFO_TAB_0,
637 AR_PHY_CHAN_INFO_TAB_1,
638 AR_PHY_CHAN_INFO_TAB_2,
639 };
640 s32 iq_res[6];
641 s32 iqc_coeff[2];
642 s32 i, j;
643 u32 num_chains = 0;
644
645 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
646 if (ah->txchainmask & (1 << i))
647 num_chains++;
648 }
649
650 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
651 AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
652 DELPT);
653 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_START,
654 AR_PHY_TX_IQCAL_START_DO_CAL,
655 AR_PHY_TX_IQCAL_START_DO_CAL);
656
657 if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START,
658 AR_PHY_TX_IQCAL_START_DO_CAL,
659 0, AH_WAIT_TIMEOUT)) {
660 ath_print(common, ATH_DBG_CALIBRATE,
661 "Tx IQ Cal not complete.\n");
662 goto TX_IQ_CAL_FAILED;
663 }
664
665 for (i = 0; i < num_chains; i++) {
666 ath_print(common, ATH_DBG_CALIBRATE,
667 "Doing Tx IQ Cal for chain %d.\n", i);
668
669 if (REG_READ(ah, txiqcal_status[i]) &
670 AR_PHY_TX_IQCAL_STATUS_FAILED) {
671 ath_print(common, ATH_DBG_CALIBRATE,
672 "Tx IQ Cal failed for chain %d.\n", i);
673 goto TX_IQ_CAL_FAILED;
674 }
675
676 for (j = 0; j < 3; j++) {
677 u_int8_t idx = 2 * j,
678 offset = 4 * j;
679
680 REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY,
681 AR_PHY_CHAN_INFO_TAB_S2_READ, 0);
682
683 /* 32 bits */
684 iq_res[idx] = REG_READ(ah, chan_info_tab[i] + offset);
685
686 REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY,
687 AR_PHY_CHAN_INFO_TAB_S2_READ, 1);
688
689 /* 16 bits */
690 iq_res[idx+1] = 0xffff & REG_READ(ah,
691 chan_info_tab[i] +
692 offset);
693
694 ath_print(common, ATH_DBG_CALIBRATE,
695 "IQ RES[%d]=0x%x IQ_RES[%d]=0x%x\n",
696 idx, iq_res[idx], idx+1, iq_res[idx+1]);
697 }
698
699 if (!ar9003_hw_calc_iq_corr(ah, i, iq_res, iqc_coeff)) {
700 ath_print(common, ATH_DBG_CALIBRATE,
701 "Failed in calculation of IQ correction.\n");
702 goto TX_IQ_CAL_FAILED;
703 }
704
705 ath_print(common, ATH_DBG_CALIBRATE,
706 "IQ_COEFF[0] = 0x%x IQ_COEFF[1] = 0x%x\n",
707 iqc_coeff[0], iqc_coeff[1]);
708
709 REG_RMW_FIELD(ah, tx_corr_coeff[i],
710 AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE,
711 iqc_coeff[0]);
712 REG_RMW_FIELD(ah, rx_corr[i],
713 AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_Q_COFF,
714 iqc_coeff[1] >> 7);
715 REG_RMW_FIELD(ah, rx_corr[i],
716 AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_I_COFF,
717 iqc_coeff[1]);
718 }
719
720 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_3,
721 AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN, 0x1);
722 REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
723 AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
724
725 return;
726
727TX_IQ_CAL_FAILED:
728 ath_print(common, ATH_DBG_CALIBRATE, "Tx IQ Cal failed\n");
729 return;
730}
731
732static bool ar9003_hw_init_cal(struct ath_hw *ah,
733 struct ath9k_channel *chan)
734{
735 struct ath_common *common = ath9k_hw_common(ah);
736
737 /*
738 * 0x7 = 0b111 , AR9003 needs to be configured for 3-chain mode before
739 * running AGC/TxIQ cals
740 */
741 ar9003_hw_set_chain_masks(ah, 0x7, 0x7);
742
743 /* Calibrate the AGC */
744 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
745 REG_READ(ah, AR_PHY_AGC_CONTROL) |
746 AR_PHY_AGC_CONTROL_CAL);
747
748 /* Poll for offset calibration complete */
749 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
750 0, AH_WAIT_TIMEOUT)) {
751 ath_print(common, ATH_DBG_CALIBRATE,
752 "offset calibration failed to "
753 "complete in 1ms; noisy environment?\n");
754 return false;
755 }
756
757 /* Do Tx IQ Calibration */
758 if (ah->config.tx_iq_calibration)
759 ar9003_hw_tx_iq_cal(ah);
760
761 /* Revert chainmasks to their original values before NF cal */
762 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
763
764 /* Initialize list pointers */
765 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
766
767 if (ar9003_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) {
768 INIT_CAL(&ah->iq_caldata);
769 INSERT_CAL(ah, &ah->iq_caldata);
770 ath_print(common, ATH_DBG_CALIBRATE,
771 "enabling IQ Calibration.\n");
772 }
773
774 if (ar9003_hw_iscal_supported(ah, TEMP_COMP_CAL)) {
775 INIT_CAL(&ah->tempCompCalData);
776 INSERT_CAL(ah, &ah->tempCompCalData);
777 ath_print(common, ATH_DBG_CALIBRATE,
778 "enabling Temperature Compensation Calibration.\n");
779 }
780
781 /* Initialize current pointer to first element in list */
782 ah->cal_list_curr = ah->cal_list;
783
784 if (ah->cal_list_curr)
785 ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
786
787 chan->CalValid = 0;
788
789 return true;
790}
791
792void ar9003_hw_attach_calib_ops(struct ath_hw *ah)
793{
794 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
795 struct ath_hw_ops *ops = ath9k_hw_ops(ah);
796
797 priv_ops->init_cal_settings = ar9003_hw_init_cal_settings;
798 priv_ops->init_cal = ar9003_hw_init_cal;
799 priv_ops->setup_calibration = ar9003_hw_setup_calibration;
800 priv_ops->iscal_supported = ar9003_hw_iscal_supported;
801
802 ops->calibrate = ar9003_hw_calibrate;
803}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
new file mode 100644
index 000000000000..8a79550dff71
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -0,0 +1,1860 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "hw.h"
18#include "ar9003_phy.h"
19#include "ar9003_eeprom.h"
20
21#define COMP_HDR_LEN 4
22#define COMP_CKSUM_LEN 2
23
24#define AR_CH0_TOP (0x00016288)
25#define AR_CH0_TOP_XPABIASLVL (0x3)
26#define AR_CH0_TOP_XPABIASLVL_S (8)
27
28#define AR_CH0_THERM (0x00016290)
29#define AR_CH0_THERM_SPARE (0x3f)
30#define AR_CH0_THERM_SPARE_S (0)
31
32#define AR_SWITCH_TABLE_COM_ALL (0xffff)
33#define AR_SWITCH_TABLE_COM_ALL_S (0)
34
35#define AR_SWITCH_TABLE_COM2_ALL (0xffffff)
36#define AR_SWITCH_TABLE_COM2_ALL_S (0)
37
38#define AR_SWITCH_TABLE_ALL (0xfff)
39#define AR_SWITCH_TABLE_ALL_S (0)
40
41static const struct ar9300_eeprom ar9300_default = {
42 .eepromVersion = 2,
43 .templateVersion = 2,
44 .macAddr = {1, 2, 3, 4, 5, 6},
45 .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
46 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
47 .baseEepHeader = {
48 .regDmn = {0, 0x1f},
49 .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
50 .opCapFlags = {
51 .opFlags = AR9300_OPFLAGS_11G | AR9300_OPFLAGS_11A,
52 .eepMisc = 0,
53 },
54 .rfSilent = 0,
55 .blueToothOptions = 0,
56 .deviceCap = 0,
57 .deviceType = 5, /* takes lower byte in eeprom location */
58 .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
59 .params_for_tuning_caps = {0, 0},
60 .featureEnable = 0x0c,
61 /*
62 * bit0 - enable tx temp comp - disabled
63 * bit1 - enable tx volt comp - disabled
64 * bit2 - enable fastClock - enabled
65 * bit3 - enable doubling - enabled
66 * bit4 - enable internal regulator - disabled
67 */
68 .miscConfiguration = 0, /* bit0 - turn down drivestrength */
69 .eepromWriteEnableGpio = 3,
70 .wlanDisableGpio = 0,
71 .wlanLedGpio = 8,
72 .rxBandSelectGpio = 0xff,
73 .txrxgain = 0,
74 .swreg = 0,
75 },
76 .modalHeader2G = {
77 /* ar9300_modal_eep_header 2g */
78 /* 4 idle,t1,t2,b(4 bits per setting) */
79 .antCtrlCommon = 0x110,
80 /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
81 .antCtrlCommon2 = 0x22222,
82
83 /*
84 * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r,
85 * rx1, rx12, b (2 bits each)
86 */
87 .antCtrlChain = {0x150, 0x150, 0x150},
88
89 /*
90 * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db
91 * for ar9280 (0xa20c/b20c 5:0)
92 */
93 .xatten1DB = {0, 0, 0},
94
95 /*
96 * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
97 * for ar9280 (0xa20c/b20c 16:12
98 */
99 .xatten1Margin = {0, 0, 0},
100 .tempSlope = 36,
101 .voltSlope = 0,
102
103 /*
104 * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur
105 * channels in usual fbin coding format
106 */
107 .spurChans = {0, 0, 0, 0, 0},
108
109 /*
110 * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check
111 * if the register is per chain
112 */
113 .noiseFloorThreshCh = {-1, 0, 0},
114 .ob = {1, 1, 1},/* 3 chain */
115 .db_stage2 = {1, 1, 1}, /* 3 chain */
116 .db_stage3 = {0, 0, 0},
117 .db_stage4 = {0, 0, 0},
118 .xpaBiasLvl = 0,
119 .txFrameToDataStart = 0x0e,
120 .txFrameToPaOn = 0x0e,
121 .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
122 .antennaGain = 0,
123 .switchSettling = 0x2c,
124 .adcDesiredSize = -30,
125 .txEndToXpaOff = 0,
126 .txEndToRxOn = 0x2,
127 .txFrameToXpaOn = 0xe,
128 .thresh62 = 28,
129 .futureModal = { /* [32] */
130 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
131 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
132 },
133 },
134 .calFreqPier2G = {
135 FREQ2FBIN(2412, 1),
136 FREQ2FBIN(2437, 1),
137 FREQ2FBIN(2472, 1),
138 },
139 /* ar9300_cal_data_per_freq_op_loop 2g */
140 .calPierData2G = {
141 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
142 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
143 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
144 },
145 .calTarget_freqbin_Cck = {
146 FREQ2FBIN(2412, 1),
147 FREQ2FBIN(2484, 1),
148 },
149 .calTarget_freqbin_2G = {
150 FREQ2FBIN(2412, 1),
151 FREQ2FBIN(2437, 1),
152 FREQ2FBIN(2472, 1)
153 },
154 .calTarget_freqbin_2GHT20 = {
155 FREQ2FBIN(2412, 1),
156 FREQ2FBIN(2437, 1),
157 FREQ2FBIN(2472, 1)
158 },
159 .calTarget_freqbin_2GHT40 = {
160 FREQ2FBIN(2412, 1),
161 FREQ2FBIN(2437, 1),
162 FREQ2FBIN(2472, 1)
163 },
164 .calTargetPowerCck = {
165 /* 1L-5L,5S,11L,11S */
166 { {36, 36, 36, 36} },
167 { {36, 36, 36, 36} },
168 },
169 .calTargetPower2G = {
170 /* 6-24,36,48,54 */
171 { {32, 32, 28, 24} },
172 { {32, 32, 28, 24} },
173 { {32, 32, 28, 24} },
174 },
175 .calTargetPower2GHT20 = {
176 { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
177 { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
178 { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
179 },
180 .calTargetPower2GHT40 = {
181 { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
182 { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
183 { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
184 },
185 .ctlIndex_2G = {
186 0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
187 0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
188 },
189 .ctl_freqbin_2G = {
190 {
191 FREQ2FBIN(2412, 1),
192 FREQ2FBIN(2417, 1),
193 FREQ2FBIN(2457, 1),
194 FREQ2FBIN(2462, 1)
195 },
196 {
197 FREQ2FBIN(2412, 1),
198 FREQ2FBIN(2417, 1),
199 FREQ2FBIN(2462, 1),
200 0xFF,
201 },
202
203 {
204 FREQ2FBIN(2412, 1),
205 FREQ2FBIN(2417, 1),
206 FREQ2FBIN(2462, 1),
207 0xFF,
208 },
209 {
210 FREQ2FBIN(2422, 1),
211 FREQ2FBIN(2427, 1),
212 FREQ2FBIN(2447, 1),
213 FREQ2FBIN(2452, 1)
214 },
215
216 {
217 /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
218 /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
219 /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
220 /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1),
221 },
222
223 {
224 /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
225 /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
226 /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
227 0,
228 },
229
230 {
231 /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
232 /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
233 FREQ2FBIN(2472, 1),
234 0,
235 },
236
237 {
238 /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
239 /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
240 /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
241 /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
242 },
243
244 {
245 /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
246 /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
247 /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
248 },
249
250 {
251 /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
252 /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
253 /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
254 0
255 },
256
257 {
258 /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
259 /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
260 /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
261 0
262 },
263
264 {
265 /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
266 /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
267 /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
268 /* Data[11].ctlEdges[3].bChannel */
269 FREQ2FBIN(2462, 1),
270 }
271 },
272 .ctlPowerData_2G = {
273 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
274 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
275 { { {60, 1}, {60, 0}, {60, 0}, {60, 1} } },
276
277 { { {60, 1}, {60, 0}, {0, 0}, {0, 0} } },
278 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
279 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
280
281 { { {60, 0}, {60, 1}, {60, 1}, {60, 0} } },
282 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
283 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
284
285 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
286 { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } },
287 },
288 .modalHeader5G = {
289 /* 4 idle,t1,t2,b (4 bits per setting) */
290 .antCtrlCommon = 0x110,
291 /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
292 .antCtrlCommon2 = 0x22222,
293 /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
294 .antCtrlChain = {
295 0x000, 0x000, 0x000,
296 },
297 /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
298 .xatten1DB = {0, 0, 0},
299
300 /*
301 * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
302 * for merlin (0xa20c/b20c 16:12
303 */
304 .xatten1Margin = {0, 0, 0},
305 .tempSlope = 68,
306 .voltSlope = 0,
307 /* spurChans spur channels in usual fbin coding format */
308 .spurChans = {0, 0, 0, 0, 0},
309 /* noiseFloorThreshCh Check if the register is per chain */
310 .noiseFloorThreshCh = {-1, 0, 0},
311 .ob = {3, 3, 3}, /* 3 chain */
312 .db_stage2 = {3, 3, 3}, /* 3 chain */
313 .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
314 .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
315 .xpaBiasLvl = 0,
316 .txFrameToDataStart = 0x0e,
317 .txFrameToPaOn = 0x0e,
318 .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
319 .antennaGain = 0,
320 .switchSettling = 0x2d,
321 .adcDesiredSize = -30,
322 .txEndToXpaOff = 0,
323 .txEndToRxOn = 0x2,
324 .txFrameToXpaOn = 0xe,
325 .thresh62 = 28,
326 .futureModal = {
327 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
328 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
329 },
330 },
331 .calFreqPier5G = {
332 FREQ2FBIN(5180, 0),
333 FREQ2FBIN(5220, 0),
334 FREQ2FBIN(5320, 0),
335 FREQ2FBIN(5400, 0),
336 FREQ2FBIN(5500, 0),
337 FREQ2FBIN(5600, 0),
338 FREQ2FBIN(5725, 0),
339 FREQ2FBIN(5825, 0)
340 },
341 .calPierData5G = {
342 {
343 {0, 0, 0, 0, 0},
344 {0, 0, 0, 0, 0},
345 {0, 0, 0, 0, 0},
346 {0, 0, 0, 0, 0},
347 {0, 0, 0, 0, 0},
348 {0, 0, 0, 0, 0},
349 {0, 0, 0, 0, 0},
350 {0, 0, 0, 0, 0},
351 },
352 {
353 {0, 0, 0, 0, 0},
354 {0, 0, 0, 0, 0},
355 {0, 0, 0, 0, 0},
356 {0, 0, 0, 0, 0},
357 {0, 0, 0, 0, 0},
358 {0, 0, 0, 0, 0},
359 {0, 0, 0, 0, 0},
360 {0, 0, 0, 0, 0},
361 },
362 {
363 {0, 0, 0, 0, 0},
364 {0, 0, 0, 0, 0},
365 {0, 0, 0, 0, 0},
366 {0, 0, 0, 0, 0},
367 {0, 0, 0, 0, 0},
368 {0, 0, 0, 0, 0},
369 {0, 0, 0, 0, 0},
370 {0, 0, 0, 0, 0},
371 },
372
373 },
374 .calTarget_freqbin_5G = {
375 FREQ2FBIN(5180, 0),
376 FREQ2FBIN(5220, 0),
377 FREQ2FBIN(5320, 0),
378 FREQ2FBIN(5400, 0),
379 FREQ2FBIN(5500, 0),
380 FREQ2FBIN(5600, 0),
381 FREQ2FBIN(5725, 0),
382 FREQ2FBIN(5825, 0)
383 },
384 .calTarget_freqbin_5GHT20 = {
385 FREQ2FBIN(5180, 0),
386 FREQ2FBIN(5240, 0),
387 FREQ2FBIN(5320, 0),
388 FREQ2FBIN(5500, 0),
389 FREQ2FBIN(5700, 0),
390 FREQ2FBIN(5745, 0),
391 FREQ2FBIN(5725, 0),
392 FREQ2FBIN(5825, 0)
393 },
394 .calTarget_freqbin_5GHT40 = {
395 FREQ2FBIN(5180, 0),
396 FREQ2FBIN(5240, 0),
397 FREQ2FBIN(5320, 0),
398 FREQ2FBIN(5500, 0),
399 FREQ2FBIN(5700, 0),
400 FREQ2FBIN(5745, 0),
401 FREQ2FBIN(5725, 0),
402 FREQ2FBIN(5825, 0)
403 },
404 .calTargetPower5G = {
405 /* 6-24,36,48,54 */
406 { {20, 20, 20, 10} },
407 { {20, 20, 20, 10} },
408 { {20, 20, 20, 10} },
409 { {20, 20, 20, 10} },
410 { {20, 20, 20, 10} },
411 { {20, 20, 20, 10} },
412 { {20, 20, 20, 10} },
413 { {20, 20, 20, 10} },
414 },
415 .calTargetPower5GHT20 = {
416 /*
417 * 0_8_16,1-3_9-11_17-19,
418 * 4,5,6,7,12,13,14,15,20,21,22,23
419 */
420 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
421 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
422 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
423 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
424 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
425 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
426 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
427 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
428 },
429 .calTargetPower5GHT40 = {
430 /*
431 * 0_8_16,1-3_9-11_17-19,
432 * 4,5,6,7,12,13,14,15,20,21,22,23
433 */
434 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
435 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
436 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
437 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
438 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
439 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
440 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
441 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
442 },
443 .ctlIndex_5G = {
444 0x10, 0x16, 0x18, 0x40, 0x46,
445 0x48, 0x30, 0x36, 0x38
446 },
447 .ctl_freqbin_5G = {
448 {
449 /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
450 /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
451 /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
452 /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
453 /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0),
454 /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
455 /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
456 /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
457 },
458 {
459 /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
460 /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
461 /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
462 /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
463 /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0),
464 /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
465 /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
466 /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
467 },
468
469 {
470 /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
471 /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
472 /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
473 /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0),
474 /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0),
475 /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0),
476 /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0),
477 /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0)
478 },
479
480 {
481 /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
482 /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
483 /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0),
484 /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0),
485 /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
486 /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
487 /* Data[3].ctlEdges[6].bChannel */ 0xFF,
488 /* Data[3].ctlEdges[7].bChannel */ 0xFF,
489 },
490
491 {
492 /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
493 /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
494 /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0),
495 /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0),
496 /* Data[4].ctlEdges[4].bChannel */ 0xFF,
497 /* Data[4].ctlEdges[5].bChannel */ 0xFF,
498 /* Data[4].ctlEdges[6].bChannel */ 0xFF,
499 /* Data[4].ctlEdges[7].bChannel */ 0xFF,
500 },
501
502 {
503 /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
504 /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0),
505 /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0),
506 /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
507 /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0),
508 /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
509 /* Data[5].ctlEdges[6].bChannel */ 0xFF,
510 /* Data[5].ctlEdges[7].bChannel */ 0xFF
511 },
512
513 {
514 /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
515 /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
516 /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0),
517 /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0),
518 /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
519 /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0),
520 /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0),
521 /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0)
522 },
523
524 {
525 /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
526 /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
527 /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0),
528 /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
529 /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0),
530 /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
531 /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
532 /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
533 },
534
535 {
536 /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
537 /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
538 /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
539 /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
540 /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0),
541 /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
542 /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0),
543 /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0)
544 }
545 },
546 .ctlPowerData_5G = {
547 {
548 {
549 {60, 1}, {60, 1}, {60, 1}, {60, 1},
550 {60, 1}, {60, 1}, {60, 1}, {60, 0},
551 }
552 },
553 {
554 {
555 {60, 1}, {60, 1}, {60, 1}, {60, 1},
556 {60, 1}, {60, 1}, {60, 1}, {60, 0},
557 }
558 },
559 {
560 {
561 {60, 0}, {60, 1}, {60, 0}, {60, 1},
562 {60, 1}, {60, 1}, {60, 1}, {60, 1},
563 }
564 },
565 {
566 {
567 {60, 0}, {60, 1}, {60, 1}, {60, 0},
568 {60, 1}, {60, 0}, {60, 0}, {60, 0},
569 }
570 },
571 {
572 {
573 {60, 1}, {60, 1}, {60, 1}, {60, 0},
574 {60, 0}, {60, 0}, {60, 0}, {60, 0},
575 }
576 },
577 {
578 {
579 {60, 1}, {60, 1}, {60, 1}, {60, 1},
580 {60, 1}, {60, 0}, {60, 0}, {60, 0},
581 }
582 },
583 {
584 {
585 {60, 1}, {60, 1}, {60, 1}, {60, 1},
586 {60, 1}, {60, 1}, {60, 1}, {60, 1},
587 }
588 },
589 {
590 {
591 {60, 1}, {60, 1}, {60, 0}, {60, 1},
592 {60, 1}, {60, 1}, {60, 1}, {60, 0},
593 }
594 },
595 {
596 {
597 {60, 1}, {60, 0}, {60, 1}, {60, 1},
598 {60, 1}, {60, 1}, {60, 0}, {60, 1},
599 }
600 },
601 }
602};
603
604static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah)
605{
606 return 0;
607}
608
609static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
610 enum eeprom_param param)
611{
612 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
613 struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader;
614
615 switch (param) {
616 case EEP_MAC_LSW:
617 return eep->macAddr[0] << 8 | eep->macAddr[1];
618 case EEP_MAC_MID:
619 return eep->macAddr[2] << 8 | eep->macAddr[3];
620 case EEP_MAC_MSW:
621 return eep->macAddr[4] << 8 | eep->macAddr[5];
622 case EEP_REG_0:
623 return pBase->regDmn[0];
624 case EEP_REG_1:
625 return pBase->regDmn[1];
626 case EEP_OP_CAP:
627 return pBase->deviceCap;
628 case EEP_OP_MODE:
629 return pBase->opCapFlags.opFlags;
630 case EEP_RF_SILENT:
631 return pBase->rfSilent;
632 case EEP_TX_MASK:
633 return (pBase->txrxMask >> 4) & 0xf;
634 case EEP_RX_MASK:
635 return pBase->txrxMask & 0xf;
636 case EEP_DRIVE_STRENGTH:
637#define AR9300_EEP_BASE_DRIV_STRENGTH 0x1
638 return pBase->miscConfiguration & AR9300_EEP_BASE_DRIV_STRENGTH;
639 case EEP_INTERNAL_REGULATOR:
640 /* Bit 4 is internal regulator flag */
641 return (pBase->featureEnable & 0x10) >> 4;
642 case EEP_SWREG:
643 return pBase->swreg;
644 default:
645 return 0;
646 }
647}
648
649#ifdef __BIG_ENDIAN
650static void ar9300_swap_eeprom(struct ar9300_eeprom *eep)
651{
652 u32 dword;
653 u16 word;
654 int i;
655
656 word = swab16(eep->baseEepHeader.regDmn[0]);
657 eep->baseEepHeader.regDmn[0] = word;
658
659 word = swab16(eep->baseEepHeader.regDmn[1]);
660 eep->baseEepHeader.regDmn[1] = word;
661
662 dword = swab32(eep->baseEepHeader.swreg);
663 eep->baseEepHeader.swreg = dword;
664
665 dword = swab32(eep->modalHeader2G.antCtrlCommon);
666 eep->modalHeader2G.antCtrlCommon = dword;
667
668 dword = swab32(eep->modalHeader2G.antCtrlCommon2);
669 eep->modalHeader2G.antCtrlCommon2 = dword;
670
671 dword = swab32(eep->modalHeader5G.antCtrlCommon);
672 eep->modalHeader5G.antCtrlCommon = dword;
673
674 dword = swab32(eep->modalHeader5G.antCtrlCommon2);
675 eep->modalHeader5G.antCtrlCommon2 = dword;
676
677 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
678 word = swab16(eep->modalHeader2G.antCtrlChain[i]);
679 eep->modalHeader2G.antCtrlChain[i] = word;
680
681 word = swab16(eep->modalHeader5G.antCtrlChain[i]);
682 eep->modalHeader5G.antCtrlChain[i] = word;
683 }
684}
685#endif
686
687static bool ar9300_hw_read_eeprom(struct ath_hw *ah,
688 long address, u8 *buffer, int many)
689{
690 int i;
691 u8 value[2];
692 unsigned long eepAddr;
693 unsigned long byteAddr;
694 u16 *svalue;
695 struct ath_common *common = ath9k_hw_common(ah);
696
697 if ((address < 0) || ((address + many) > AR9300_EEPROM_SIZE - 1)) {
698 ath_print(common, ATH_DBG_EEPROM,
699 "eeprom address not in range\n");
700 return false;
701 }
702
703 for (i = 0; i < many; i++) {
704 eepAddr = (u16) (address + i) / 2;
705 byteAddr = (u16) (address + i) % 2;
706 svalue = (u16 *) value;
707 if (!ath9k_hw_nvram_read(common, eepAddr, svalue)) {
708 ath_print(common, ATH_DBG_EEPROM,
709 "unable to read eeprom region\n");
710 return false;
711 }
712 *svalue = le16_to_cpu(*svalue);
713 buffer[i] = value[byteAddr];
714 }
715
716 return true;
717}
718
719static bool ar9300_read_eeprom(struct ath_hw *ah,
720 int address, u8 *buffer, int many)
721{
722 int it;
723
724 for (it = 0; it < many; it++)
725 if (!ar9300_hw_read_eeprom(ah,
726 (address - it),
727 (buffer + it), 1))
728 return false;
729 return true;
730}
731
732static void ar9300_comp_hdr_unpack(u8 *best, int *code, int *reference,
733 int *length, int *major, int *minor)
734{
735 unsigned long value[4];
736
737 value[0] = best[0];
738 value[1] = best[1];
739 value[2] = best[2];
740 value[3] = best[3];
741 *code = ((value[0] >> 5) & 0x0007);
742 *reference = (value[0] & 0x001f) | ((value[1] >> 2) & 0x0020);
743 *length = ((value[1] << 4) & 0x07f0) | ((value[2] >> 4) & 0x000f);
744 *major = (value[2] & 0x000f);
745 *minor = (value[3] & 0x00ff);
746}
747
748static u16 ar9300_comp_cksum(u8 *data, int dsize)
749{
750 int it, checksum = 0;
751
752 for (it = 0; it < dsize; it++) {
753 checksum += data[it];
754 checksum &= 0xffff;
755 }
756
757 return checksum;
758}
759
760static bool ar9300_uncompress_block(struct ath_hw *ah,
761 u8 *mptr,
762 int mdataSize,
763 u8 *block,
764 int size)
765{
766 int it;
767 int spot;
768 int offset;
769 int length;
770 struct ath_common *common = ath9k_hw_common(ah);
771
772 spot = 0;
773
774 for (it = 0; it < size; it += (length+2)) {
775 offset = block[it];
776 offset &= 0xff;
777 spot += offset;
778 length = block[it+1];
779 length &= 0xff;
780
781 if (length > 0 && spot >= 0 && spot+length < mdataSize) {
782 ath_print(common, ATH_DBG_EEPROM,
783 "Restore at %d: spot=%d "
784 "offset=%d length=%d\n",
785 it, spot, offset, length);
786 memcpy(&mptr[spot], &block[it+2], length);
787 spot += length;
788 } else if (length > 0) {
789 ath_print(common, ATH_DBG_EEPROM,
790 "Bad restore at %d: spot=%d "
791 "offset=%d length=%d\n",
792 it, spot, offset, length);
793 return false;
794 }
795 }
796 return true;
797}
798
799static int ar9300_compress_decision(struct ath_hw *ah,
800 int it,
801 int code,
802 int reference,
803 u8 *mptr,
804 u8 *word, int length, int mdata_size)
805{
806 struct ath_common *common = ath9k_hw_common(ah);
807 u8 *dptr;
808
809 switch (code) {
810 case _CompressNone:
811 if (length != mdata_size) {
812 ath_print(common, ATH_DBG_EEPROM,
813 "EEPROM structure size mismatch"
814 "memory=%d eeprom=%d\n", mdata_size, length);
815 return -1;
816 }
817 memcpy(mptr, (u8 *) (word + COMP_HDR_LEN), length);
818 ath_print(common, ATH_DBG_EEPROM, "restored eeprom %d:"
819 " uncompressed, length %d\n", it, length);
820 break;
821 case _CompressBlock:
822 if (reference == 0) {
823 dptr = mptr;
824 } else {
825 if (reference != 2) {
826 ath_print(common, ATH_DBG_EEPROM,
827 "cant find reference eeprom"
828 "struct %d\n", reference);
829 return -1;
830 }
831 memcpy(mptr, &ar9300_default, mdata_size);
832 }
833 ath_print(common, ATH_DBG_EEPROM,
834 "restore eeprom %d: block, reference %d,"
835 " length %d\n", it, reference, length);
836 ar9300_uncompress_block(ah, mptr, mdata_size,
837 (u8 *) (word + COMP_HDR_LEN), length);
838 break;
839 default:
840 ath_print(common, ATH_DBG_EEPROM, "unknown compression"
841 " code %d\n", code);
842 return -1;
843 }
844 return 0;
845}
846
847/*
848 * Read the configuration data from the eeprom.
849 * The data can be put in any specified memory buffer.
850 *
851 * Returns -1 on error.
852 * Returns address of next memory location on success.
853 */
854static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
855 u8 *mptr, int mdata_size)
856{
857#define MDEFAULT 15
858#define MSTATE 100
859 int cptr;
860 u8 *word;
861 int code;
862 int reference, length, major, minor;
863 int osize;
864 int it;
865 u16 checksum, mchecksum;
866 struct ath_common *common = ath9k_hw_common(ah);
867
868 word = kzalloc(2048, GFP_KERNEL);
869 if (!word)
870 return -1;
871
872 memcpy(mptr, &ar9300_default, mdata_size);
873
874 cptr = AR9300_BASE_ADDR;
875 for (it = 0; it < MSTATE; it++) {
876 if (!ar9300_read_eeprom(ah, cptr, word, COMP_HDR_LEN))
877 goto fail;
878
879 if ((word[0] == 0 && word[1] == 0 && word[2] == 0 &&
880 word[3] == 0) || (word[0] == 0xff && word[1] == 0xff
881 && word[2] == 0xff && word[3] == 0xff))
882 break;
883
884 ar9300_comp_hdr_unpack(word, &code, &reference,
885 &length, &major, &minor);
886 ath_print(common, ATH_DBG_EEPROM,
887 "Found block at %x: code=%d ref=%d"
888 "length=%d major=%d minor=%d\n", cptr, code,
889 reference, length, major, minor);
890 if (length >= 1024) {
891 ath_print(common, ATH_DBG_EEPROM,
892 "Skipping bad header\n");
893 cptr -= COMP_HDR_LEN;
894 continue;
895 }
896
897 osize = length;
898 ar9300_read_eeprom(ah, cptr, word,
899 COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
900 checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length);
901 mchecksum = word[COMP_HDR_LEN + osize] |
902 (word[COMP_HDR_LEN + osize + 1] << 8);
903 ath_print(common, ATH_DBG_EEPROM,
904 "checksum %x %x\n", checksum, mchecksum);
905 if (checksum == mchecksum) {
906 ar9300_compress_decision(ah, it, code, reference, mptr,
907 word, length, mdata_size);
908 } else {
909 ath_print(common, ATH_DBG_EEPROM,
910 "skipping block with bad checksum\n");
911 }
912 cptr -= (COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
913 }
914
915 kfree(word);
916 return cptr;
917
918fail:
919 kfree(word);
920 return -1;
921}
922
923/*
924 * Restore the configuration structure by reading the eeprom.
925 * This function destroys any existing in-memory structure
926 * content.
927 */
928static bool ath9k_hw_ar9300_fill_eeprom(struct ath_hw *ah)
929{
930 u8 *mptr = NULL;
931 int mdata_size;
932
933 mptr = (u8 *) &ah->eeprom.ar9300_eep;
934 mdata_size = sizeof(struct ar9300_eeprom);
935
936 if (mptr && mdata_size > 0) {
937 /* At this point, mptr points to the eeprom data structure
938 * in it's "default" state. If this is big endian, swap the
939 * data structures back to "little endian"
940 */
941 /* First swap, default to Little Endian */
942#ifdef __BIG_ENDIAN
943 ar9300_swap_eeprom((struct ar9300_eeprom *)mptr);
944#endif
945 if (ar9300_eeprom_restore_internal(ah, mptr, mdata_size) >= 0)
946 return true;
947
948 /* Second Swap, back to Big Endian */
949#ifdef __BIG_ENDIAN
950 ar9300_swap_eeprom((struct ar9300_eeprom *)mptr);
951#endif
952 }
953 return false;
954}
955
956/* XXX: review hardware docs */
957static int ath9k_hw_ar9300_get_eeprom_ver(struct ath_hw *ah)
958{
959 return ah->eeprom.ar9300_eep.eepromVersion;
960}
961
962/* XXX: could be read from the eepromVersion, not sure yet */
963static int ath9k_hw_ar9300_get_eeprom_rev(struct ath_hw *ah)
964{
965 return 0;
966}
967
968static u8 ath9k_hw_ar9300_get_num_ant_config(struct ath_hw *ah,
969 enum ieee80211_band freq_band)
970{
971 return 1;
972}
973
974static u16 ath9k_hw_ar9300_get_eeprom_antenna_cfg(struct ath_hw *ah,
975 struct ath9k_channel *chan)
976{
977 return -EINVAL;
978}
979
980static s32 ar9003_hw_xpa_bias_level_get(struct ath_hw *ah, bool is2ghz)
981{
982 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
983
984 if (is2ghz)
985 return eep->modalHeader2G.xpaBiasLvl;
986 else
987 return eep->modalHeader5G.xpaBiasLvl;
988}
989
990static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
991{
992 int bias = ar9003_hw_xpa_bias_level_get(ah, is2ghz);
993 REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, (bias & 0x3));
994 REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_SPARE,
995 ((bias >> 2) & 0x3));
996}
997
998static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
999{
1000 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
1001
1002 if (is2ghz)
1003 return eep->modalHeader2G.antCtrlCommon;
1004 else
1005 return eep->modalHeader5G.antCtrlCommon;
1006}
1007
1008static u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz)
1009{
1010 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
1011
1012 if (is2ghz)
1013 return eep->modalHeader2G.antCtrlCommon2;
1014 else
1015 return eep->modalHeader5G.antCtrlCommon2;
1016}
1017
1018static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah,
1019 int chain,
1020 bool is2ghz)
1021{
1022 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
1023
1024 if (chain >= 0 && chain < AR9300_MAX_CHAINS) {
1025 if (is2ghz)
1026 return eep->modalHeader2G.antCtrlChain[chain];
1027 else
1028 return eep->modalHeader5G.antCtrlChain[chain];
1029 }
1030
1031 return 0;
1032}
1033
1034static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
1035{
1036 u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
1037 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, AR_SWITCH_TABLE_COM_ALL, value);
1038
1039 value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz);
1040 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value);
1041
1042 value = ar9003_hw_ant_ctrl_chain_get(ah, 0, is2ghz);
1043 REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_0, AR_SWITCH_TABLE_ALL, value);
1044
1045 value = ar9003_hw_ant_ctrl_chain_get(ah, 1, is2ghz);
1046 REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_1, AR_SWITCH_TABLE_ALL, value);
1047
1048 value = ar9003_hw_ant_ctrl_chain_get(ah, 2, is2ghz);
1049 REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_2, AR_SWITCH_TABLE_ALL, value);
1050}
1051
1052static void ar9003_hw_drive_strength_apply(struct ath_hw *ah)
1053{
1054 int drive_strength;
1055 unsigned long reg;
1056
1057 drive_strength = ath9k_hw_ar9300_get_eeprom(ah, EEP_DRIVE_STRENGTH);
1058
1059 if (!drive_strength)
1060 return;
1061
1062 reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS1);
1063 reg &= ~0x00ffffc0;
1064 reg |= 0x5 << 21;
1065 reg |= 0x5 << 18;
1066 reg |= 0x5 << 15;
1067 reg |= 0x5 << 12;
1068 reg |= 0x5 << 9;
1069 reg |= 0x5 << 6;
1070 REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS1, reg);
1071
1072 reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS2);
1073 reg &= ~0xffffffe0;
1074 reg |= 0x5 << 29;
1075 reg |= 0x5 << 26;
1076 reg |= 0x5 << 23;
1077 reg |= 0x5 << 20;
1078 reg |= 0x5 << 17;
1079 reg |= 0x5 << 14;
1080 reg |= 0x5 << 11;
1081 reg |= 0x5 << 8;
1082 reg |= 0x5 << 5;
1083 REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS2, reg);
1084
1085 reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS4);
1086 reg &= ~0xff800000;
1087 reg |= 0x5 << 29;
1088 reg |= 0x5 << 26;
1089 reg |= 0x5 << 23;
1090 REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS4, reg);
1091}
1092
1093static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
1094{
1095 int internal_regulator =
1096 ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR);
1097
1098 if (internal_regulator) {
1099 /* Internal regulator is ON. Write swreg register. */
1100 int swreg = ath9k_hw_ar9300_get_eeprom(ah, EEP_SWREG);
1101 REG_WRITE(ah, AR_RTC_REG_CONTROL1,
1102 REG_READ(ah, AR_RTC_REG_CONTROL1) &
1103 (~AR_RTC_REG_CONTROL1_SWREG_PROGRAM));
1104 REG_WRITE(ah, AR_RTC_REG_CONTROL0, swreg);
1105 /* Set REG_CONTROL1.SWREG_PROGRAM */
1106 REG_WRITE(ah, AR_RTC_REG_CONTROL1,
1107 REG_READ(ah,
1108 AR_RTC_REG_CONTROL1) |
1109 AR_RTC_REG_CONTROL1_SWREG_PROGRAM);
1110 } else {
1111 REG_WRITE(ah, AR_RTC_SLEEP_CLK,
1112 (REG_READ(ah,
1113 AR_RTC_SLEEP_CLK) |
1114 AR_RTC_FORCE_SWREG_PRD));
1115 }
1116}
1117
1118static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
1119 struct ath9k_channel *chan)
1120{
1121 ar9003_hw_xpa_bias_level_apply(ah, IS_CHAN_2GHZ(chan));
1122 ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan));
1123 ar9003_hw_drive_strength_apply(ah);
1124 ar9003_hw_internal_regulator_apply(ah);
1125}
1126
1127static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah,
1128 struct ath9k_channel *chan)
1129{
1130}
1131
1132/*
1133 * Returns the interpolated y value corresponding to the specified x value
1134 * from the np ordered pairs of data (px,py).
1135 * The pairs do not have to be in any order.
1136 * If the specified x value is less than any of the px,
1137 * the returned y value is equal to the py for the lowest px.
1138 * If the specified x value is greater than any of the px,
1139 * the returned y value is equal to the py for the highest px.
1140 */
1141static int ar9003_hw_power_interpolate(int32_t x,
1142 int32_t *px, int32_t *py, u_int16_t np)
1143{
1144 int ip = 0;
1145 int lx = 0, ly = 0, lhave = 0;
1146 int hx = 0, hy = 0, hhave = 0;
1147 int dx = 0;
1148 int y = 0;
1149
1150 lhave = 0;
1151 hhave = 0;
1152
1153 /* identify best lower and higher x calibration measurement */
1154 for (ip = 0; ip < np; ip++) {
1155 dx = x - px[ip];
1156
1157 /* this measurement is higher than our desired x */
1158 if (dx <= 0) {
1159 if (!hhave || dx > (x - hx)) {
1160 /* new best higher x measurement */
1161 hx = px[ip];
1162 hy = py[ip];
1163 hhave = 1;
1164 }
1165 }
1166 /* this measurement is lower than our desired x */
1167 if (dx >= 0) {
1168 if (!lhave || dx < (x - lx)) {
1169 /* new best lower x measurement */
1170 lx = px[ip];
1171 ly = py[ip];
1172 lhave = 1;
1173 }
1174 }
1175 }
1176
1177 /* the low x is good */
1178 if (lhave) {
1179 /* so is the high x */
1180 if (hhave) {
1181 /* they're the same, so just pick one */
1182 if (hx == lx)
1183 y = ly;
1184 else /* interpolate */
1185 y = ly + (((x - lx) * (hy - ly)) / (hx - lx));
1186 } else /* only low is good, use it */
1187 y = ly;
1188 } else if (hhave) /* only high is good, use it */
1189 y = hy;
1190 else /* nothing is good,this should never happen unless np=0, ???? */
1191 y = -(1 << 30);
1192 return y;
1193}
1194
1195static u8 ar9003_hw_eeprom_get_tgt_pwr(struct ath_hw *ah,
1196 u16 rateIndex, u16 freq, bool is2GHz)
1197{
1198 u16 numPiers, i;
1199 s32 targetPowerArray[AR9300_NUM_5G_20_TARGET_POWERS];
1200 s32 freqArray[AR9300_NUM_5G_20_TARGET_POWERS];
1201 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
1202 struct cal_tgt_pow_legacy *pEepromTargetPwr;
1203 u8 *pFreqBin;
1204
1205 if (is2GHz) {
1206 numPiers = AR9300_NUM_2G_20_TARGET_POWERS;
1207 pEepromTargetPwr = eep->calTargetPower2G;
1208 pFreqBin = eep->calTarget_freqbin_2G;
1209 } else {
1210 numPiers = AR9300_NUM_5G_20_TARGET_POWERS;
1211 pEepromTargetPwr = eep->calTargetPower5G;
1212 pFreqBin = eep->calTarget_freqbin_5G;
1213 }
1214
1215 /*
1216 * create array of channels and targetpower from
1217 * targetpower piers stored on eeprom
1218 */
1219 for (i = 0; i < numPiers; i++) {
1220 freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz);
1221 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
1222 }
1223
1224 /* interpolate to get target power for given frequency */
1225 return (u8) ar9003_hw_power_interpolate((s32) freq,
1226 freqArray,
1227 targetPowerArray, numPiers);
1228}
1229
1230static u8 ar9003_hw_eeprom_get_ht20_tgt_pwr(struct ath_hw *ah,
1231 u16 rateIndex,
1232 u16 freq, bool is2GHz)
1233{
1234 u16 numPiers, i;
1235 s32 targetPowerArray[AR9300_NUM_5G_20_TARGET_POWERS];
1236 s32 freqArray[AR9300_NUM_5G_20_TARGET_POWERS];
1237 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
1238 struct cal_tgt_pow_ht *pEepromTargetPwr;
1239 u8 *pFreqBin;
1240
1241 if (is2GHz) {
1242 numPiers = AR9300_NUM_2G_20_TARGET_POWERS;
1243 pEepromTargetPwr = eep->calTargetPower2GHT20;
1244 pFreqBin = eep->calTarget_freqbin_2GHT20;
1245 } else {
1246 numPiers = AR9300_NUM_5G_20_TARGET_POWERS;
1247 pEepromTargetPwr = eep->calTargetPower5GHT20;
1248 pFreqBin = eep->calTarget_freqbin_5GHT20;
1249 }
1250
1251 /*
1252 * create array of channels and targetpower
1253 * from targetpower piers stored on eeprom
1254 */
1255 for (i = 0; i < numPiers; i++) {
1256 freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz);
1257 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
1258 }
1259
1260 /* interpolate to get target power for given frequency */
1261 return (u8) ar9003_hw_power_interpolate((s32) freq,
1262 freqArray,
1263 targetPowerArray, numPiers);
1264}
1265
1266static u8 ar9003_hw_eeprom_get_ht40_tgt_pwr(struct ath_hw *ah,
1267 u16 rateIndex,
1268 u16 freq, bool is2GHz)
1269{
1270 u16 numPiers, i;
1271 s32 targetPowerArray[AR9300_NUM_5G_40_TARGET_POWERS];
1272 s32 freqArray[AR9300_NUM_5G_40_TARGET_POWERS];
1273 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
1274 struct cal_tgt_pow_ht *pEepromTargetPwr;
1275 u8 *pFreqBin;
1276
1277 if (is2GHz) {
1278 numPiers = AR9300_NUM_2G_40_TARGET_POWERS;
1279 pEepromTargetPwr = eep->calTargetPower2GHT40;
1280 pFreqBin = eep->calTarget_freqbin_2GHT40;
1281 } else {
1282 numPiers = AR9300_NUM_5G_40_TARGET_POWERS;
1283 pEepromTargetPwr = eep->calTargetPower5GHT40;
1284 pFreqBin = eep->calTarget_freqbin_5GHT40;
1285 }
1286
1287 /*
1288 * create array of channels and targetpower from
1289 * targetpower piers stored on eeprom
1290 */
1291 for (i = 0; i < numPiers; i++) {
1292 freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz);
1293 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
1294 }
1295
1296 /* interpolate to get target power for given frequency */
1297 return (u8) ar9003_hw_power_interpolate((s32) freq,
1298 freqArray,
1299 targetPowerArray, numPiers);
1300}
1301
1302static u8 ar9003_hw_eeprom_get_cck_tgt_pwr(struct ath_hw *ah,
1303 u16 rateIndex, u16 freq)
1304{
1305 u16 numPiers = AR9300_NUM_2G_CCK_TARGET_POWERS, i;
1306 s32 targetPowerArray[AR9300_NUM_2G_CCK_TARGET_POWERS];
1307 s32 freqArray[AR9300_NUM_2G_CCK_TARGET_POWERS];
1308 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
1309 struct cal_tgt_pow_legacy *pEepromTargetPwr = eep->calTargetPowerCck;
1310 u8 *pFreqBin = eep->calTarget_freqbin_Cck;
1311
1312 /*
1313 * create array of channels and targetpower from
1314 * targetpower piers stored on eeprom
1315 */
1316 for (i = 0; i < numPiers; i++) {
1317 freqArray[i] = FBIN2FREQ(pFreqBin[i], 1);
1318 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
1319 }
1320
1321 /* interpolate to get target power for given frequency */
1322 return (u8) ar9003_hw_power_interpolate((s32) freq,
1323 freqArray,
1324 targetPowerArray, numPiers);
1325}
1326
1327/* Set tx power registers to array of values passed in */
1328static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
1329{
1330#define POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
1331 /* make sure forced gain is not set */
1332 REG_WRITE(ah, 0xa458, 0);
1333
1334 /* Write the OFDM power per rate set */
1335
1336 /* 6 (LSB), 9, 12, 18 (MSB) */
1337 REG_WRITE(ah, 0xa3c0,
1338 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) |
1339 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 16) |
1340 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) |
1341 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0));
1342
1343 /* 24 (LSB), 36, 48, 54 (MSB) */
1344 REG_WRITE(ah, 0xa3c4,
1345 POW_SM(pPwrArray[ALL_TARGET_LEGACY_54], 24) |
1346 POW_SM(pPwrArray[ALL_TARGET_LEGACY_48], 16) |
1347 POW_SM(pPwrArray[ALL_TARGET_LEGACY_36], 8) |
1348 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0));
1349
1350 /* Write the CCK power per rate set */
1351
1352 /* 1L (LSB), reserved, 2L, 2S (MSB) */
1353 REG_WRITE(ah, 0xa3c8,
1354 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 24) |
1355 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) |
1356 /* POW_SM(txPowerTimes2, 8) | this is reserved for AR9003 */
1357 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0));
1358
1359 /* 5.5L (LSB), 5.5S, 11L, 11S (MSB) */
1360 REG_WRITE(ah, 0xa3cc,
1361 POW_SM(pPwrArray[ALL_TARGET_LEGACY_11S], 24) |
1362 POW_SM(pPwrArray[ALL_TARGET_LEGACY_11L], 16) |
1363 POW_SM(pPwrArray[ALL_TARGET_LEGACY_5S], 8) |
1364 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0)
1365 );
1366
1367 /* Write the HT20 power per rate set */
1368
1369 /* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */
1370 REG_WRITE(ah, 0xa3d0,
1371 POW_SM(pPwrArray[ALL_TARGET_HT20_5], 24) |
1372 POW_SM(pPwrArray[ALL_TARGET_HT20_4], 16) |
1373 POW_SM(pPwrArray[ALL_TARGET_HT20_1_3_9_11_17_19], 8) |
1374 POW_SM(pPwrArray[ALL_TARGET_HT20_0_8_16], 0)
1375 );
1376
1377 /* 6 (LSB), 7, 12, 13 (MSB) */
1378 REG_WRITE(ah, 0xa3d4,
1379 POW_SM(pPwrArray[ALL_TARGET_HT20_13], 24) |
1380 POW_SM(pPwrArray[ALL_TARGET_HT20_12], 16) |
1381 POW_SM(pPwrArray[ALL_TARGET_HT20_7], 8) |
1382 POW_SM(pPwrArray[ALL_TARGET_HT20_6], 0)
1383 );
1384
1385 /* 14 (LSB), 15, 20, 21 */
1386 REG_WRITE(ah, 0xa3e4,
1387 POW_SM(pPwrArray[ALL_TARGET_HT20_21], 24) |
1388 POW_SM(pPwrArray[ALL_TARGET_HT20_20], 16) |
1389 POW_SM(pPwrArray[ALL_TARGET_HT20_15], 8) |
1390 POW_SM(pPwrArray[ALL_TARGET_HT20_14], 0)
1391 );
1392
1393 /* Mixed HT20 and HT40 rates */
1394
1395 /* HT20 22 (LSB), HT20 23, HT40 22, HT40 23 (MSB) */
1396 REG_WRITE(ah, 0xa3e8,
1397 POW_SM(pPwrArray[ALL_TARGET_HT40_23], 24) |
1398 POW_SM(pPwrArray[ALL_TARGET_HT40_22], 16) |
1399 POW_SM(pPwrArray[ALL_TARGET_HT20_23], 8) |
1400 POW_SM(pPwrArray[ALL_TARGET_HT20_22], 0)
1401 );
1402
1403 /*
1404 * Write the HT40 power per rate set
1405 * correct PAR difference between HT40 and HT20/LEGACY
1406 * 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB)
1407 */
1408 REG_WRITE(ah, 0xa3d8,
1409 POW_SM(pPwrArray[ALL_TARGET_HT40_5], 24) |
1410 POW_SM(pPwrArray[ALL_TARGET_HT40_4], 16) |
1411 POW_SM(pPwrArray[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
1412 POW_SM(pPwrArray[ALL_TARGET_HT40_0_8_16], 0)
1413 );
1414
1415 /* 6 (LSB), 7, 12, 13 (MSB) */
1416 REG_WRITE(ah, 0xa3dc,
1417 POW_SM(pPwrArray[ALL_TARGET_HT40_13], 24) |
1418 POW_SM(pPwrArray[ALL_TARGET_HT40_12], 16) |
1419 POW_SM(pPwrArray[ALL_TARGET_HT40_7], 8) |
1420 POW_SM(pPwrArray[ALL_TARGET_HT40_6], 0)
1421 );
1422
1423 /* 14 (LSB), 15, 20, 21 */
1424 REG_WRITE(ah, 0xa3ec,
1425 POW_SM(pPwrArray[ALL_TARGET_HT40_21], 24) |
1426 POW_SM(pPwrArray[ALL_TARGET_HT40_20], 16) |
1427 POW_SM(pPwrArray[ALL_TARGET_HT40_15], 8) |
1428 POW_SM(pPwrArray[ALL_TARGET_HT40_14], 0)
1429 );
1430
1431 return 0;
1432#undef POW_SM
1433}
1434
1435static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq)
1436{
1437 u8 targetPowerValT2[ar9300RateSize];
1438 /* XXX: hard code for now, need to get from eeprom struct */
1439 u8 ht40PowerIncForPdadc = 0;
1440 bool is2GHz = false;
1441 unsigned int i = 0;
1442 struct ath_common *common = ath9k_hw_common(ah);
1443
1444 if (freq < 4000)
1445 is2GHz = true;
1446
1447 targetPowerValT2[ALL_TARGET_LEGACY_6_24] =
1448 ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_6_24, freq,
1449 is2GHz);
1450 targetPowerValT2[ALL_TARGET_LEGACY_36] =
1451 ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_36, freq,
1452 is2GHz);
1453 targetPowerValT2[ALL_TARGET_LEGACY_48] =
1454 ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_48, freq,
1455 is2GHz);
1456 targetPowerValT2[ALL_TARGET_LEGACY_54] =
1457 ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_54, freq,
1458 is2GHz);
1459 targetPowerValT2[ALL_TARGET_LEGACY_1L_5L] =
1460 ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_1L_5L,
1461 freq);
1462 targetPowerValT2[ALL_TARGET_LEGACY_5S] =
1463 ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_5S, freq);
1464 targetPowerValT2[ALL_TARGET_LEGACY_11L] =
1465 ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11L, freq);
1466 targetPowerValT2[ALL_TARGET_LEGACY_11S] =
1467 ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11S, freq);
1468 targetPowerValT2[ALL_TARGET_HT20_0_8_16] =
1469 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq,
1470 is2GHz);
1471 targetPowerValT2[ALL_TARGET_HT20_1_3_9_11_17_19] =
1472 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_1_3_9_11_17_19,
1473 freq, is2GHz);
1474 targetPowerValT2[ALL_TARGET_HT20_4] =
1475 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_4, freq,
1476 is2GHz);
1477 targetPowerValT2[ALL_TARGET_HT20_5] =
1478 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_5, freq,
1479 is2GHz);
1480 targetPowerValT2[ALL_TARGET_HT20_6] =
1481 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_6, freq,
1482 is2GHz);
1483 targetPowerValT2[ALL_TARGET_HT20_7] =
1484 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_7, freq,
1485 is2GHz);
1486 targetPowerValT2[ALL_TARGET_HT20_12] =
1487 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_12, freq,
1488 is2GHz);
1489 targetPowerValT2[ALL_TARGET_HT20_13] =
1490 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_13, freq,
1491 is2GHz);
1492 targetPowerValT2[ALL_TARGET_HT20_14] =
1493 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_14, freq,
1494 is2GHz);
1495 targetPowerValT2[ALL_TARGET_HT20_15] =
1496 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_15, freq,
1497 is2GHz);
1498 targetPowerValT2[ALL_TARGET_HT20_20] =
1499 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_20, freq,
1500 is2GHz);
1501 targetPowerValT2[ALL_TARGET_HT20_21] =
1502 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_21, freq,
1503 is2GHz);
1504 targetPowerValT2[ALL_TARGET_HT20_22] =
1505 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_22, freq,
1506 is2GHz);
1507 targetPowerValT2[ALL_TARGET_HT20_23] =
1508 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_23, freq,
1509 is2GHz);
1510 targetPowerValT2[ALL_TARGET_HT40_0_8_16] =
1511 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq,
1512 is2GHz) + ht40PowerIncForPdadc;
1513 targetPowerValT2[ALL_TARGET_HT40_1_3_9_11_17_19] =
1514 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_1_3_9_11_17_19,
1515 freq,
1516 is2GHz) + ht40PowerIncForPdadc;
1517 targetPowerValT2[ALL_TARGET_HT40_4] =
1518 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_4, freq,
1519 is2GHz) + ht40PowerIncForPdadc;
1520 targetPowerValT2[ALL_TARGET_HT40_5] =
1521 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_5, freq,
1522 is2GHz) + ht40PowerIncForPdadc;
1523 targetPowerValT2[ALL_TARGET_HT40_6] =
1524 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_6, freq,
1525 is2GHz) + ht40PowerIncForPdadc;
1526 targetPowerValT2[ALL_TARGET_HT40_7] =
1527 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_7, freq,
1528 is2GHz) + ht40PowerIncForPdadc;
1529 targetPowerValT2[ALL_TARGET_HT40_12] =
1530 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_12, freq,
1531 is2GHz) + ht40PowerIncForPdadc;
1532 targetPowerValT2[ALL_TARGET_HT40_13] =
1533 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_13, freq,
1534 is2GHz) + ht40PowerIncForPdadc;
1535 targetPowerValT2[ALL_TARGET_HT40_14] =
1536 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_14, freq,
1537 is2GHz) + ht40PowerIncForPdadc;
1538 targetPowerValT2[ALL_TARGET_HT40_15] =
1539 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_15, freq,
1540 is2GHz) + ht40PowerIncForPdadc;
1541 targetPowerValT2[ALL_TARGET_HT40_20] =
1542 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_20, freq,
1543 is2GHz) + ht40PowerIncForPdadc;
1544 targetPowerValT2[ALL_TARGET_HT40_21] =
1545 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_21, freq,
1546 is2GHz) + ht40PowerIncForPdadc;
1547 targetPowerValT2[ALL_TARGET_HT40_22] =
1548 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_22, freq,
1549 is2GHz) + ht40PowerIncForPdadc;
1550 targetPowerValT2[ALL_TARGET_HT40_23] =
1551 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_23, freq,
1552 is2GHz) + ht40PowerIncForPdadc;
1553
1554 while (i < ar9300RateSize) {
1555 ath_print(common, ATH_DBG_EEPROM,
1556 "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
1557 i++;
1558
1559 ath_print(common, ATH_DBG_EEPROM,
1560 "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
1561 i++;
1562
1563 ath_print(common, ATH_DBG_EEPROM,
1564 "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
1565 i++;
1566
1567 ath_print(common, ATH_DBG_EEPROM,
1568 "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]);
1569 i++;
1570 }
1571
1572 /* Write target power array to registers */
1573 ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
1574}
1575
1576static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
1577 int mode,
1578 int ipier,
1579 int ichain,
1580 int *pfrequency,
1581 int *pcorrection,
1582 int *ptemperature, int *pvoltage)
1583{
1584 u8 *pCalPier;
1585 struct ar9300_cal_data_per_freq_op_loop *pCalPierStruct;
1586 int is2GHz;
1587 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
1588 struct ath_common *common = ath9k_hw_common(ah);
1589
1590 if (ichain >= AR9300_MAX_CHAINS) {
1591 ath_print(common, ATH_DBG_EEPROM,
1592 "Invalid chain index, must be less than %d\n",
1593 AR9300_MAX_CHAINS);
1594 return -1;
1595 }
1596
1597 if (mode) { /* 5GHz */
1598 if (ipier >= AR9300_NUM_5G_CAL_PIERS) {
1599 ath_print(common, ATH_DBG_EEPROM,
1600 "Invalid 5GHz cal pier index, must "
1601 "be less than %d\n",
1602 AR9300_NUM_5G_CAL_PIERS);
1603 return -1;
1604 }
1605 pCalPier = &(eep->calFreqPier5G[ipier]);
1606 pCalPierStruct = &(eep->calPierData5G[ichain][ipier]);
1607 is2GHz = 0;
1608 } else {
1609 if (ipier >= AR9300_NUM_2G_CAL_PIERS) {
1610 ath_print(common, ATH_DBG_EEPROM,
1611 "Invalid 2GHz cal pier index, must "
1612 "be less than %d\n", AR9300_NUM_2G_CAL_PIERS);
1613 return -1;
1614 }
1615
1616 pCalPier = &(eep->calFreqPier2G[ipier]);
1617 pCalPierStruct = &(eep->calPierData2G[ichain][ipier]);
1618 is2GHz = 1;
1619 }
1620
1621 *pfrequency = FBIN2FREQ(*pCalPier, is2GHz);
1622 *pcorrection = pCalPierStruct->refPower;
1623 *ptemperature = pCalPierStruct->tempMeas;
1624 *pvoltage = pCalPierStruct->voltMeas;
1625
1626 return 0;
1627}
1628
1629static int ar9003_hw_power_control_override(struct ath_hw *ah,
1630 int frequency,
1631 int *correction,
1632 int *voltage, int *temperature)
1633{
1634 int tempSlope = 0;
1635 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
1636
1637 REG_RMW(ah, AR_PHY_TPC_11_B0,
1638 (correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
1639 AR_PHY_TPC_OLPC_GAIN_DELTA);
1640 REG_RMW(ah, AR_PHY_TPC_11_B1,
1641 (correction[1] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
1642 AR_PHY_TPC_OLPC_GAIN_DELTA);
1643 REG_RMW(ah, AR_PHY_TPC_11_B2,
1644 (correction[2] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
1645 AR_PHY_TPC_OLPC_GAIN_DELTA);
1646
1647 /* enable open loop power control on chip */
1648 REG_RMW(ah, AR_PHY_TPC_6_B0,
1649 (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
1650 AR_PHY_TPC_6_ERROR_EST_MODE);
1651 REG_RMW(ah, AR_PHY_TPC_6_B1,
1652 (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
1653 AR_PHY_TPC_6_ERROR_EST_MODE);
1654 REG_RMW(ah, AR_PHY_TPC_6_B2,
1655 (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
1656 AR_PHY_TPC_6_ERROR_EST_MODE);
1657
1658 /*
1659 * enable temperature compensation
1660 * Need to use register names
1661 */
1662 if (frequency < 4000)
1663 tempSlope = eep->modalHeader2G.tempSlope;
1664 else
1665 tempSlope = eep->modalHeader5G.tempSlope;
1666
1667 REG_RMW_FIELD(ah, AR_PHY_TPC_19, AR_PHY_TPC_19_ALPHA_THERM, tempSlope);
1668 REG_RMW_FIELD(ah, AR_PHY_TPC_18, AR_PHY_TPC_18_THERM_CAL_VALUE,
1669 temperature[0]);
1670
1671 return 0;
1672}
1673
1674/* Apply the recorded correction values. */
1675static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
1676{
1677 int ichain, ipier, npier;
1678 int mode;
1679 int lfrequency[AR9300_MAX_CHAINS],
1680 lcorrection[AR9300_MAX_CHAINS],
1681 ltemperature[AR9300_MAX_CHAINS], lvoltage[AR9300_MAX_CHAINS];
1682 int hfrequency[AR9300_MAX_CHAINS],
1683 hcorrection[AR9300_MAX_CHAINS],
1684 htemperature[AR9300_MAX_CHAINS], hvoltage[AR9300_MAX_CHAINS];
1685 int fdiff;
1686 int correction[AR9300_MAX_CHAINS],
1687 voltage[AR9300_MAX_CHAINS], temperature[AR9300_MAX_CHAINS];
1688 int pfrequency, pcorrection, ptemperature, pvoltage;
1689 struct ath_common *common = ath9k_hw_common(ah);
1690
1691 mode = (frequency >= 4000);
1692 if (mode)
1693 npier = AR9300_NUM_5G_CAL_PIERS;
1694 else
1695 npier = AR9300_NUM_2G_CAL_PIERS;
1696
1697 for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
1698 lfrequency[ichain] = 0;
1699 hfrequency[ichain] = 100000;
1700 }
1701 /* identify best lower and higher frequency calibration measurement */
1702 for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
1703 for (ipier = 0; ipier < npier; ipier++) {
1704 if (!ar9003_hw_cal_pier_get(ah, mode, ipier, ichain,
1705 &pfrequency, &pcorrection,
1706 &ptemperature, &pvoltage)) {
1707 fdiff = frequency - pfrequency;
1708
1709 /*
1710 * this measurement is higher than
1711 * our desired frequency
1712 */
1713 if (fdiff <= 0) {
1714 if (hfrequency[ichain] <= 0 ||
1715 hfrequency[ichain] >= 100000 ||
1716 fdiff >
1717 (frequency - hfrequency[ichain])) {
1718 /*
1719 * new best higher
1720 * frequency measurement
1721 */
1722 hfrequency[ichain] = pfrequency;
1723 hcorrection[ichain] =
1724 pcorrection;
1725 htemperature[ichain] =
1726 ptemperature;
1727 hvoltage[ichain] = pvoltage;
1728 }
1729 }
1730 if (fdiff >= 0) {
1731 if (lfrequency[ichain] <= 0
1732 || fdiff <
1733 (frequency - lfrequency[ichain])) {
1734 /*
1735 * new best lower
1736 * frequency measurement
1737 */
1738 lfrequency[ichain] = pfrequency;
1739 lcorrection[ichain] =
1740 pcorrection;
1741 ltemperature[ichain] =
1742 ptemperature;
1743 lvoltage[ichain] = pvoltage;
1744 }
1745 }
1746 }
1747 }
1748 }
1749
1750 /* interpolate */
1751 for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
1752 ath_print(common, ATH_DBG_EEPROM,
1753 "ch=%d f=%d low=%d %d h=%d %d\n",
1754 ichain, frequency, lfrequency[ichain],
1755 lcorrection[ichain], hfrequency[ichain],
1756 hcorrection[ichain]);
1757 /* they're the same, so just pick one */
1758 if (hfrequency[ichain] == lfrequency[ichain]) {
1759 correction[ichain] = lcorrection[ichain];
1760 voltage[ichain] = lvoltage[ichain];
1761 temperature[ichain] = ltemperature[ichain];
1762 }
1763 /* the low frequency is good */
1764 else if (frequency - lfrequency[ichain] < 1000) {
1765 /* so is the high frequency, interpolate */
1766 if (hfrequency[ichain] - frequency < 1000) {
1767
1768 correction[ichain] = lcorrection[ichain] +
1769 (((frequency - lfrequency[ichain]) *
1770 (hcorrection[ichain] -
1771 lcorrection[ichain])) /
1772 (hfrequency[ichain] - lfrequency[ichain]));
1773
1774 temperature[ichain] = ltemperature[ichain] +
1775 (((frequency - lfrequency[ichain]) *
1776 (htemperature[ichain] -
1777 ltemperature[ichain])) /
1778 (hfrequency[ichain] - lfrequency[ichain]));
1779
1780 voltage[ichain] =
1781 lvoltage[ichain] +
1782 (((frequency -
1783 lfrequency[ichain]) * (hvoltage[ichain] -
1784 lvoltage[ichain]))
1785 / (hfrequency[ichain] -
1786 lfrequency[ichain]));
1787 }
1788 /* only low is good, use it */
1789 else {
1790 correction[ichain] = lcorrection[ichain];
1791 temperature[ichain] = ltemperature[ichain];
1792 voltage[ichain] = lvoltage[ichain];
1793 }
1794 }
1795 /* only high is good, use it */
1796 else if (hfrequency[ichain] - frequency < 1000) {
1797 correction[ichain] = hcorrection[ichain];
1798 temperature[ichain] = htemperature[ichain];
1799 voltage[ichain] = hvoltage[ichain];
1800 } else { /* nothing is good, presume 0???? */
1801 correction[ichain] = 0;
1802 temperature[ichain] = 0;
1803 voltage[ichain] = 0;
1804 }
1805 }
1806
1807 ar9003_hw_power_control_override(ah, frequency, correction, voltage,
1808 temperature);
1809
1810 ath_print(common, ATH_DBG_EEPROM,
1811 "for frequency=%d, calibration correction = %d %d %d\n",
1812 frequency, correction[0], correction[1], correction[2]);
1813
1814 return 0;
1815}
1816
1817static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
1818 struct ath9k_channel *chan, u16 cfgCtl,
1819 u8 twiceAntennaReduction,
1820 u8 twiceMaxRegulatoryPower,
1821 u8 powerLimit)
1822{
1823 ah->txpower_limit = powerLimit;
1824 ar9003_hw_set_target_power_eeprom(ah, chan->channel);
1825 ar9003_hw_calibration_apply(ah, chan->channel);
1826}
1827
1828static u16 ath9k_hw_ar9300_get_spur_channel(struct ath_hw *ah,
1829 u16 i, bool is2GHz)
1830{
1831 return AR_NO_SPUR;
1832}
1833
1834s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah)
1835{
1836 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
1837
1838 return (eep->baseEepHeader.txrxgain >> 4) & 0xf; /* bits 7:4 */
1839}
1840
1841s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah)
1842{
1843 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
1844
1845 return (eep->baseEepHeader.txrxgain) & 0xf; /* bits 3:0 */
1846}
1847
1848const struct eeprom_ops eep_ar9300_ops = {
1849 .check_eeprom = ath9k_hw_ar9300_check_eeprom,
1850 .get_eeprom = ath9k_hw_ar9300_get_eeprom,
1851 .fill_eeprom = ath9k_hw_ar9300_fill_eeprom,
1852 .get_eeprom_ver = ath9k_hw_ar9300_get_eeprom_ver,
1853 .get_eeprom_rev = ath9k_hw_ar9300_get_eeprom_rev,
1854 .get_num_ant_config = ath9k_hw_ar9300_get_num_ant_config,
1855 .get_eeprom_antenna_cfg = ath9k_hw_ar9300_get_eeprom_antenna_cfg,
1856 .set_board_values = ath9k_hw_ar9300_set_board_values,
1857 .set_addac = ath9k_hw_ar9300_set_addac,
1858 .set_txpower = ath9k_hw_ar9300_set_txpower,
1859 .get_spur_channel = ath9k_hw_ar9300_get_spur_channel
1860};
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
new file mode 100644
index 000000000000..d8c0318f416f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -0,0 +1,323 @@
1#ifndef AR9003_EEPROM_H
2#define AR9003_EEPROM_H
3
4#include <linux/types.h>
5
6#define AR9300_EEP_VER 0xD000
7#define AR9300_EEP_VER_MINOR_MASK 0xFFF
8#define AR9300_EEP_MINOR_VER_1 0x1
9#define AR9300_EEP_MINOR_VER AR9300_EEP_MINOR_VER_1
10
11/* 16-bit offset location start of calibration struct */
12#define AR9300_EEP_START_LOC 256
13#define AR9300_NUM_5G_CAL_PIERS 8
14#define AR9300_NUM_2G_CAL_PIERS 3
15#define AR9300_NUM_5G_20_TARGET_POWERS 8
16#define AR9300_NUM_5G_40_TARGET_POWERS 8
17#define AR9300_NUM_2G_CCK_TARGET_POWERS 2
18#define AR9300_NUM_2G_20_TARGET_POWERS 3
19#define AR9300_NUM_2G_40_TARGET_POWERS 3
20/* #define AR9300_NUM_CTLS 21 */
21#define AR9300_NUM_CTLS_5G 9
22#define AR9300_NUM_CTLS_2G 12
23#define AR9300_CTL_MODE_M 0xF
24#define AR9300_NUM_BAND_EDGES_5G 8
25#define AR9300_NUM_BAND_EDGES_2G 4
26#define AR9300_NUM_PD_GAINS 4
27#define AR9300_PD_GAINS_IN_MASK 4
28#define AR9300_PD_GAIN_ICEPTS 5
29#define AR9300_EEPROM_MODAL_SPURS 5
30#define AR9300_MAX_RATE_POWER 63
31#define AR9300_NUM_PDADC_VALUES 128
32#define AR9300_NUM_RATES 16
33#define AR9300_BCHAN_UNUSED 0xFF
34#define AR9300_MAX_PWR_RANGE_IN_HALF_DB 64
35#define AR9300_OPFLAGS_11A 0x01
36#define AR9300_OPFLAGS_11G 0x02
37#define AR9300_OPFLAGS_5G_HT40 0x04
38#define AR9300_OPFLAGS_2G_HT40 0x08
39#define AR9300_OPFLAGS_5G_HT20 0x10
40#define AR9300_OPFLAGS_2G_HT20 0x20
41#define AR9300_EEPMISC_BIG_ENDIAN 0x01
42#define AR9300_EEPMISC_WOW 0x02
43#define AR9300_CUSTOMER_DATA_SIZE 20
44
45#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
46#define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x))
47#define AR9300_MAX_CHAINS 3
48#define AR9300_ANT_16S 25
49#define AR9300_FUTURE_MODAL_SZ 6
50
51#define AR9300_NUM_ANT_CHAIN_FIELDS 7
52#define AR9300_NUM_ANT_COMMON_FIELDS 4
53#define AR9300_SIZE_ANT_CHAIN_FIELD 3
54#define AR9300_SIZE_ANT_COMMON_FIELD 4
55#define AR9300_ANT_CHAIN_MASK 0x7
56#define AR9300_ANT_COMMON_MASK 0xf
57#define AR9300_CHAIN_0_IDX 0
58#define AR9300_CHAIN_1_IDX 1
59#define AR9300_CHAIN_2_IDX 2
60
61#define AR928X_NUM_ANT_CHAIN_FIELDS 6
62#define AR928X_SIZE_ANT_CHAIN_FIELD 2
63#define AR928X_ANT_CHAIN_MASK 0x3
64
65/* Delta from which to start power to pdadc table */
66/* This offset is used in both open loop and closed loop power control
67 * schemes. In open loop power control, it is not really needed, but for
68 * the "sake of consistency" it was kept. For certain AP designs, this
69 * value is overwritten by the value in the flag "pwrTableOffset" just
70 * before writing the pdadc vs pwr into the chip registers.
71 */
72#define AR9300_PWR_TABLE_OFFSET 0
73
74/* enable flags for voltage and temp compensation */
75#define ENABLE_TEMP_COMPENSATION 0x01
76#define ENABLE_VOLT_COMPENSATION 0x02
77/* byte addressable */
78#define AR9300_EEPROM_SIZE (16*1024)
79#define FIXED_CCA_THRESHOLD 15
80
81#define AR9300_BASE_ADDR 0x3ff
82
83enum targetPowerHTRates {
84 HT_TARGET_RATE_0_8_16,
85 HT_TARGET_RATE_1_3_9_11_17_19,
86 HT_TARGET_RATE_4,
87 HT_TARGET_RATE_5,
88 HT_TARGET_RATE_6,
89 HT_TARGET_RATE_7,
90 HT_TARGET_RATE_12,
91 HT_TARGET_RATE_13,
92 HT_TARGET_RATE_14,
93 HT_TARGET_RATE_15,
94 HT_TARGET_RATE_20,
95 HT_TARGET_RATE_21,
96 HT_TARGET_RATE_22,
97 HT_TARGET_RATE_23
98};
99
100enum targetPowerLegacyRates {
101 LEGACY_TARGET_RATE_6_24,
102 LEGACY_TARGET_RATE_36,
103 LEGACY_TARGET_RATE_48,
104 LEGACY_TARGET_RATE_54
105};
106
107enum targetPowerCckRates {
108 LEGACY_TARGET_RATE_1L_5L,
109 LEGACY_TARGET_RATE_5S,
110 LEGACY_TARGET_RATE_11L,
111 LEGACY_TARGET_RATE_11S
112};
113
114enum ar9300_Rates {
115 ALL_TARGET_LEGACY_6_24,
116 ALL_TARGET_LEGACY_36,
117 ALL_TARGET_LEGACY_48,
118 ALL_TARGET_LEGACY_54,
119 ALL_TARGET_LEGACY_1L_5L,
120 ALL_TARGET_LEGACY_5S,
121 ALL_TARGET_LEGACY_11L,
122 ALL_TARGET_LEGACY_11S,
123 ALL_TARGET_HT20_0_8_16,
124 ALL_TARGET_HT20_1_3_9_11_17_19,
125 ALL_TARGET_HT20_4,
126 ALL_TARGET_HT20_5,
127 ALL_TARGET_HT20_6,
128 ALL_TARGET_HT20_7,
129 ALL_TARGET_HT20_12,
130 ALL_TARGET_HT20_13,
131 ALL_TARGET_HT20_14,
132 ALL_TARGET_HT20_15,
133 ALL_TARGET_HT20_20,
134 ALL_TARGET_HT20_21,
135 ALL_TARGET_HT20_22,
136 ALL_TARGET_HT20_23,
137 ALL_TARGET_HT40_0_8_16,
138 ALL_TARGET_HT40_1_3_9_11_17_19,
139 ALL_TARGET_HT40_4,
140 ALL_TARGET_HT40_5,
141 ALL_TARGET_HT40_6,
142 ALL_TARGET_HT40_7,
143 ALL_TARGET_HT40_12,
144 ALL_TARGET_HT40_13,
145 ALL_TARGET_HT40_14,
146 ALL_TARGET_HT40_15,
147 ALL_TARGET_HT40_20,
148 ALL_TARGET_HT40_21,
149 ALL_TARGET_HT40_22,
150 ALL_TARGET_HT40_23,
151 ar9300RateSize,
152};
153
154
155struct eepFlags {
156 u8 opFlags;
157 u8 eepMisc;
158} __packed;
159
160enum CompressAlgorithm {
161 _CompressNone = 0,
162 _CompressLzma,
163 _CompressPairs,
164 _CompressBlock,
165 _Compress4,
166 _Compress5,
167 _Compress6,
168 _Compress7,
169};
170
171struct ar9300_base_eep_hdr {
172 u16 regDmn[2];
173 /* 4 bits tx and 4 bits rx */
174 u8 txrxMask;
175 struct eepFlags opCapFlags;
176 u8 rfSilent;
177 u8 blueToothOptions;
178 u8 deviceCap;
179 /* takes lower byte in eeprom location */
180 u8 deviceType;
181 /* offset in dB to be added to beginning
182 * of pdadc table in calibration
183 */
184 int8_t pwrTableOffset;
185 u8 params_for_tuning_caps[2];
186 /*
187 * bit0 - enable tx temp comp
188 * bit1 - enable tx volt comp
189 * bit2 - enable fastClock - default to 1
190 * bit3 - enable doubling - default to 1
191 * bit4 - enable internal regulator - default to 1
192 */
193 u8 featureEnable;
194 /* misc flags: bit0 - turn down drivestrength */
195 u8 miscConfiguration;
196 u8 eepromWriteEnableGpio;
197 u8 wlanDisableGpio;
198 u8 wlanLedGpio;
199 u8 rxBandSelectGpio;
200 u8 txrxgain;
201 /* SW controlled internal regulator fields */
202 u32 swreg;
203} __packed;
204
205struct ar9300_modal_eep_header {
206 /* 4 idle, t1, t2, b (4 bits per setting) */
207 u32 antCtrlCommon;
208 /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
209 u32 antCtrlCommon2;
210 /* 6 idle, t, r, rx1, rx12, b (2 bits each) */
211 u16 antCtrlChain[AR9300_MAX_CHAINS];
212 /* 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
213 u8 xatten1DB[AR9300_MAX_CHAINS];
214 /* 3 xatten1_margin for merlin (0xa20c/b20c 16:12 */
215 u8 xatten1Margin[AR9300_MAX_CHAINS];
216 int8_t tempSlope;
217 int8_t voltSlope;
218 /* spur channels in usual fbin coding format */
219 u8 spurChans[AR9300_EEPROM_MODAL_SPURS];
220 /* 3 Check if the register is per chain */
221 int8_t noiseFloorThreshCh[AR9300_MAX_CHAINS];
222 u8 ob[AR9300_MAX_CHAINS];
223 u8 db_stage2[AR9300_MAX_CHAINS];
224 u8 db_stage3[AR9300_MAX_CHAINS];
225 u8 db_stage4[AR9300_MAX_CHAINS];
226 u8 xpaBiasLvl;
227 u8 txFrameToDataStart;
228 u8 txFrameToPaOn;
229 u8 txClip;
230 int8_t antennaGain;
231 u8 switchSettling;
232 int8_t adcDesiredSize;
233 u8 txEndToXpaOff;
234 u8 txEndToRxOn;
235 u8 txFrameToXpaOn;
236 u8 thresh62;
237 u8 futureModal[32];
238} __packed;
239
240struct ar9300_cal_data_per_freq_op_loop {
241 int8_t refPower;
242 /* pdadc voltage at power measurement */
243 u8 voltMeas;
244 /* pcdac used for power measurement */
245 u8 tempMeas;
246 /* range is -60 to -127 create a mapping equation 1db resolution */
247 int8_t rxNoisefloorCal;
248 /*range is same as noisefloor */
249 int8_t rxNoisefloorPower;
250 /* temp measured when noisefloor cal was performed */
251 u8 rxTempMeas;
252} __packed;
253
254struct cal_tgt_pow_legacy {
255 u8 tPow2x[4];
256} __packed;
257
258struct cal_tgt_pow_ht {
259 u8 tPow2x[14];
260} __packed;
261
262struct cal_ctl_edge_pwr {
263 u8 tPower:6,
264 flag:2;
265} __packed;
266
267struct cal_ctl_data_2g {
268 struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_2G];
269} __packed;
270
271struct cal_ctl_data_5g {
272 struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_5G];
273} __packed;
274
275struct ar9300_eeprom {
276 u8 eepromVersion;
277 u8 templateVersion;
278 u8 macAddr[6];
279 u8 custData[AR9300_CUSTOMER_DATA_SIZE];
280
281 struct ar9300_base_eep_hdr baseEepHeader;
282
283 struct ar9300_modal_eep_header modalHeader2G;
284 u8 calFreqPier2G[AR9300_NUM_2G_CAL_PIERS];
285 struct ar9300_cal_data_per_freq_op_loop
286 calPierData2G[AR9300_MAX_CHAINS][AR9300_NUM_2G_CAL_PIERS];
287 u8 calTarget_freqbin_Cck[AR9300_NUM_2G_CCK_TARGET_POWERS];
288 u8 calTarget_freqbin_2G[AR9300_NUM_2G_20_TARGET_POWERS];
289 u8 calTarget_freqbin_2GHT20[AR9300_NUM_2G_20_TARGET_POWERS];
290 u8 calTarget_freqbin_2GHT40[AR9300_NUM_2G_40_TARGET_POWERS];
291 struct cal_tgt_pow_legacy
292 calTargetPowerCck[AR9300_NUM_2G_CCK_TARGET_POWERS];
293 struct cal_tgt_pow_legacy
294 calTargetPower2G[AR9300_NUM_2G_20_TARGET_POWERS];
295 struct cal_tgt_pow_ht
296 calTargetPower2GHT20[AR9300_NUM_2G_20_TARGET_POWERS];
297 struct cal_tgt_pow_ht
298 calTargetPower2GHT40[AR9300_NUM_2G_40_TARGET_POWERS];
299 u8 ctlIndex_2G[AR9300_NUM_CTLS_2G];
300 u8 ctl_freqbin_2G[AR9300_NUM_CTLS_2G][AR9300_NUM_BAND_EDGES_2G];
301 struct cal_ctl_data_2g ctlPowerData_2G[AR9300_NUM_CTLS_2G];
302 struct ar9300_modal_eep_header modalHeader5G;
303 u8 calFreqPier5G[AR9300_NUM_5G_CAL_PIERS];
304 struct ar9300_cal_data_per_freq_op_loop
305 calPierData5G[AR9300_MAX_CHAINS][AR9300_NUM_5G_CAL_PIERS];
306 u8 calTarget_freqbin_5G[AR9300_NUM_5G_20_TARGET_POWERS];
307 u8 calTarget_freqbin_5GHT20[AR9300_NUM_5G_20_TARGET_POWERS];
308 u8 calTarget_freqbin_5GHT40[AR9300_NUM_5G_40_TARGET_POWERS];
309 struct cal_tgt_pow_legacy
310 calTargetPower5G[AR9300_NUM_5G_20_TARGET_POWERS];
311 struct cal_tgt_pow_ht
312 calTargetPower5GHT20[AR9300_NUM_5G_20_TARGET_POWERS];
313 struct cal_tgt_pow_ht
314 calTargetPower5GHT40[AR9300_NUM_5G_40_TARGET_POWERS];
315 u8 ctlIndex_5G[AR9300_NUM_CTLS_5G];
316 u8 ctl_freqbin_5G[AR9300_NUM_CTLS_5G][AR9300_NUM_BAND_EDGES_5G];
317 struct cal_ctl_data_5g ctlPowerData_5G[AR9300_NUM_CTLS_5G];
318} __packed;
319
320s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah);
321s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah);
322
323#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
new file mode 100644
index 000000000000..b15309caf1da
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -0,0 +1,205 @@
1/*
2 * Copyright (c) 2008-2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "hw.h"
18#include "ar9003_mac.h"
19#include "ar9003_initvals.h"
20
21/* General hardware code for the AR9003 hadware family */
22
23static bool ar9003_hw_macversion_supported(u32 macversion)
24{
25 switch (macversion) {
26 case AR_SREV_VERSION_9300:
27 return true;
28 default:
29 break;
30 }
31 return false;
32}
33
34/* AR9003 2.0 - new INI format (pre, core, post arrays per subsystem) */
35/*
36 * XXX: move TX/RX gain INI to its own init_mode_gain_regs after
37 * ensuring it does not affect hardware bring up
38 */
39static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
40{
41 /* mac */
42 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
43 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
44 ar9300_2p0_mac_core,
45 ARRAY_SIZE(ar9300_2p0_mac_core), 2);
46 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
47 ar9300_2p0_mac_postamble,
48 ARRAY_SIZE(ar9300_2p0_mac_postamble), 5);
49
50 /* bb */
51 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
52 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
53 ar9300_2p0_baseband_core,
54 ARRAY_SIZE(ar9300_2p0_baseband_core), 2);
55 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
56 ar9300_2p0_baseband_postamble,
57 ARRAY_SIZE(ar9300_2p0_baseband_postamble), 5);
58
59 /* radio */
60 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
61 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
62 ar9300_2p0_radio_core,
63 ARRAY_SIZE(ar9300_2p0_radio_core), 2);
64 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
65 ar9300_2p0_radio_postamble,
66 ARRAY_SIZE(ar9300_2p0_radio_postamble), 5);
67
68 /* soc */
69 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
70 ar9300_2p0_soc_preamble,
71 ARRAY_SIZE(ar9300_2p0_soc_preamble), 2);
72 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
73 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
74 ar9300_2p0_soc_postamble,
75 ARRAY_SIZE(ar9300_2p0_soc_postamble), 5);
76
77 /* rx/tx gain */
78 INIT_INI_ARRAY(&ah->iniModesRxGain,
79 ar9300Common_rx_gain_table_2p0,
80 ARRAY_SIZE(ar9300Common_rx_gain_table_2p0), 2);
81 INIT_INI_ARRAY(&ah->iniModesTxGain,
82 ar9300Modes_lowest_ob_db_tx_gain_table_2p0,
83 ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p0),
84 5);
85
86 /* Load PCIE SERDES settings from INI */
87
88 /* Awake Setting */
89
90 INIT_INI_ARRAY(&ah->iniPcieSerdes,
91 ar9300PciePhy_pll_on_clkreq_disable_L1_2p0,
92 ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p0),
93 2);
94
95 /* Sleep Setting */
96
97 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
98 ar9300PciePhy_clkreq_enable_L1_2p0,
99 ARRAY_SIZE(ar9300PciePhy_clkreq_enable_L1_2p0),
100 2);
101
102 /* Fast clock modal settings */
103 INIT_INI_ARRAY(&ah->iniModesAdditional,
104 ar9300Modes_fast_clock_2p0,
105 ARRAY_SIZE(ar9300Modes_fast_clock_2p0),
106 3);
107}
108
109static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
110{
111 switch (ar9003_hw_get_tx_gain_idx(ah)) {
112 case 0:
113 default:
114 INIT_INI_ARRAY(&ah->iniModesTxGain,
115 ar9300Modes_lowest_ob_db_tx_gain_table_2p0,
116 ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p0),
117 5);
118 break;
119 case 1:
120 INIT_INI_ARRAY(&ah->iniModesTxGain,
121 ar9300Modes_high_ob_db_tx_gain_table_2p0,
122 ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p0),
123 5);
124 break;
125 case 2:
126 INIT_INI_ARRAY(&ah->iniModesTxGain,
127 ar9300Modes_low_ob_db_tx_gain_table_2p0,
128 ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p0),
129 5);
130 break;
131 }
132}
133
134static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
135{
136 switch (ar9003_hw_get_rx_gain_idx(ah)) {
137 case 0:
138 default:
139 INIT_INI_ARRAY(&ah->iniModesRxGain, ar9300Common_rx_gain_table_2p0,
140 ARRAY_SIZE(ar9300Common_rx_gain_table_2p0),
141 2);
142 break;
143 case 1:
144 INIT_INI_ARRAY(&ah->iniModesRxGain,
145 ar9300Common_wo_xlna_rx_gain_table_2p0,
146 ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p0),
147 2);
148 break;
149 }
150}
151
152/* set gain table pointers according to values read from the eeprom */
153static void ar9003_hw_init_mode_gain_regs(struct ath_hw *ah)
154{
155 ar9003_tx_gain_table_apply(ah);
156 ar9003_rx_gain_table_apply(ah);
157}
158
159/*
160 * Helper for ASPM support.
161 *
162 * Disable PLL when in L0s as well as receiver clock when in L1.
163 * This power saving option must be enabled through the SerDes.
164 *
165 * Programming the SerDes must go through the same 288 bit serial shift
166 * register as the other analog registers. Hence the 9 writes.
167 */
168static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
169 int restore,
170 int power_off)
171{
172 if (ah->is_pciexpress != true)
173 return;
174
175 /* Do not touch SerDes registers */
176 if (ah->config.pcie_powersave_enable == 2)
177 return;
178
179 /* Nothing to do on restore for 11N */
180 if (!restore) {
181 /* set bit 19 to allow forcing of pcie core into L1 state */
182 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
183
184 /* Several PCIe massages to ensure proper behaviour */
185 if (ah->config.pcie_waen)
186 REG_WRITE(ah, AR_WA, ah->config.pcie_waen);
187 }
188}
189
190/* Sets up the AR9003 hardware familiy callbacks */
191void ar9003_hw_attach_ops(struct ath_hw *ah)
192{
193 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
194 struct ath_hw_ops *ops = ath9k_hw_ops(ah);
195
196 priv_ops->init_mode_regs = ar9003_hw_init_mode_regs;
197 priv_ops->init_mode_gain_regs = ar9003_hw_init_mode_gain_regs;
198 priv_ops->macversion_supported = ar9003_hw_macversion_supported;
199
200 ops->config_pci_powersave = ar9003_hw_configpcipowersave;
201
202 ar9003_hw_attach_phy_ops(ah);
203 ar9003_hw_attach_calib_ops(ah);
204 ar9003_hw_attach_mac_ops(ah);
205}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_initvals.h
new file mode 100644
index 000000000000..a131cd10ef29
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_initvals.h
@@ -0,0 +1,1784 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef INITVALS_9003_H
18#define INITVALS_9003_H
19
20/* AR9003 2.0 */
21
22static const u32 ar9300_2p0_radio_postamble[][5] = {
23 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
24 {0x0001609c, 0x0dd08f29, 0x0dd08f29, 0x0b283f31, 0x0b283f31},
25 {0x000160ac, 0xa4653c00, 0xa4653c00, 0x24652800, 0x24652800},
26 {0x000160b0, 0x03284f3e, 0x03284f3e, 0x05d08f20, 0x05d08f20},
27 {0x0001610c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
28 {0x0001650c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
29 {0x0001690c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
30};
31
32static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p0[][5] = {
33 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
34 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
35 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
36 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
37 {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
38 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
39 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
40 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
41 {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
42 {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
43 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
44 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
45 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
46 {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
47 {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
48 {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
49 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
50 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
51 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
52 {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
53 {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
54 {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
55 {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
56 {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
57 {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
58 {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
59 {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
60 {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
61 {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
62 {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
63 {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
64 {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
65 {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
66 {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
67 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
68 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
69 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
70 {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
71 {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
72 {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
73 {0x0000a598, 0x21820220, 0x21820220, 0x16800402, 0x16800402},
74 {0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
75 {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
76 {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
77 {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
78 {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
79 {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
80 {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
81 {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
82 {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
83 {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
84 {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
85 {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
86 {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x47801a83, 0x47801a83},
87 {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4a801c84, 0x4a801c84},
88 {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4e801ce3, 0x4e801ce3},
89 {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x52801ce5, 0x52801ce5},
90 {0x0000a5dc, 0x7086308c, 0x7086308c, 0x56801ce9, 0x56801ce9},
91 {0x0000a5e0, 0x738a308a, 0x738a308a, 0x5a801ceb, 0x5a801ceb},
92 {0x0000a5e4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
93 {0x0000a5e8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
94 {0x0000a5ec, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
95 {0x0000a5f0, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
96 {0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
97 {0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
98 {0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
99 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
100 {0x00016048, 0x60001a61, 0x60001a61, 0x60001a61, 0x60001a61},
101 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
102 {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
103 {0x00016448, 0x60001a61, 0x60001a61, 0x60001a61, 0x60001a61},
104 {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
105 {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
106 {0x00016848, 0x60001a61, 0x60001a61, 0x60001a61, 0x60001a61},
107 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
108};
109
110static const u32 ar9300Modes_fast_clock_2p0[][3] = {
111 /* Addr 5G_HT20 5G_HT40 */
112 {0x00001030, 0x00000268, 0x000004d0},
113 {0x00001070, 0x0000018c, 0x00000318},
114 {0x000010b0, 0x00000fd0, 0x00001fa0},
115 {0x00008014, 0x044c044c, 0x08980898},
116 {0x0000801c, 0x148ec02b, 0x148ec057},
117 {0x00008318, 0x000044c0, 0x00008980},
118 {0x00009e00, 0x03721821, 0x03721821},
119 {0x0000a230, 0x0000000b, 0x00000016},
120 {0x0000a254, 0x00000898, 0x00001130},
121};
122
123static const u32 ar9300_2p0_radio_core[][2] = {
124 /* Addr allmodes */
125 {0x00016000, 0x36db6db6},
126 {0x00016004, 0x6db6db40},
127 {0x00016008, 0x73f00000},
128 {0x0001600c, 0x00000000},
129 {0x00016040, 0x7f80fff8},
130 {0x0001604c, 0x76d005b5},
131 {0x00016050, 0x556cf031},
132 {0x00016054, 0x43449440},
133 {0x00016058, 0x0c51c92c},
134 {0x0001605c, 0x3db7fffc},
135 {0x00016060, 0xfffffffc},
136 {0x00016064, 0x000f0278},
137 {0x0001606c, 0x6db60000},
138 {0x00016080, 0x00000000},
139 {0x00016084, 0x0e48048c},
140 {0x00016088, 0x54214514},
141 {0x0001608c, 0x119f481e},
142 {0x00016090, 0x24926490},
143 {0x00016098, 0xd2888888},
144 {0x000160a0, 0x0a108ffe},
145 {0x000160a4, 0x812fc370},
146 {0x000160a8, 0x423c8000},
147 {0x000160b4, 0x92480080},
148 {0x000160c0, 0x00adb6d0},
149 {0x000160c4, 0x6db6db60},
150 {0x000160c8, 0x6db6db6c},
151 {0x000160cc, 0x01e6c000},
152 {0x00016100, 0x3fffbe01},
153 {0x00016104, 0xfff80000},
154 {0x00016108, 0x00080010},
155 {0x00016140, 0x10804008},
156 {0x00016144, 0x02084080},
157 {0x00016148, 0x00000000},
158 {0x00016280, 0x058a0001},
159 {0x00016284, 0x3d840208},
160 {0x00016288, 0x01a20408},
161 {0x0001628c, 0x00038c07},
162 {0x00016290, 0x40000004},
163 {0x00016294, 0x458aa14f},
164 {0x00016380, 0x00000000},
165 {0x00016384, 0x00000000},
166 {0x00016388, 0x00800700},
167 {0x0001638c, 0x00800700},
168 {0x00016390, 0x00800700},
169 {0x00016394, 0x00000000},
170 {0x00016398, 0x00000000},
171 {0x0001639c, 0x00000000},
172 {0x000163a0, 0x00000001},
173 {0x000163a4, 0x00000001},
174 {0x000163a8, 0x00000000},
175 {0x000163ac, 0x00000000},
176 {0x000163b0, 0x00000000},
177 {0x000163b4, 0x00000000},
178 {0x000163b8, 0x00000000},
179 {0x000163bc, 0x00000000},
180 {0x000163c0, 0x000000a0},
181 {0x000163c4, 0x000c0000},
182 {0x000163c8, 0x14021402},
183 {0x000163cc, 0x00001402},
184 {0x000163d0, 0x00000000},
185 {0x000163d4, 0x00000000},
186 {0x00016400, 0x36db6db6},
187 {0x00016404, 0x6db6db40},
188 {0x00016408, 0x73f00000},
189 {0x0001640c, 0x00000000},
190 {0x00016440, 0x7f80fff8},
191 {0x0001644c, 0x76d005b5},
192 {0x00016450, 0x556cf031},
193 {0x00016454, 0x43449440},
194 {0x00016458, 0x0c51c92c},
195 {0x0001645c, 0x3db7fffc},
196 {0x00016460, 0xfffffffc},
197 {0x00016464, 0x000f0278},
198 {0x0001646c, 0x6db60000},
199 {0x00016500, 0x3fffbe01},
200 {0x00016504, 0xfff80000},
201 {0x00016508, 0x00080010},
202 {0x00016540, 0x10804008},
203 {0x00016544, 0x02084080},
204 {0x00016548, 0x00000000},
205 {0x00016780, 0x00000000},
206 {0x00016784, 0x00000000},
207 {0x00016788, 0x00800700},
208 {0x0001678c, 0x00800700},
209 {0x00016790, 0x00800700},
210 {0x00016794, 0x00000000},
211 {0x00016798, 0x00000000},
212 {0x0001679c, 0x00000000},
213 {0x000167a0, 0x00000001},
214 {0x000167a4, 0x00000001},
215 {0x000167a8, 0x00000000},
216 {0x000167ac, 0x00000000},
217 {0x000167b0, 0x00000000},
218 {0x000167b4, 0x00000000},
219 {0x000167b8, 0x00000000},
220 {0x000167bc, 0x00000000},
221 {0x000167c0, 0x000000a0},
222 {0x000167c4, 0x000c0000},
223 {0x000167c8, 0x14021402},
224 {0x000167cc, 0x00001402},
225 {0x000167d0, 0x00000000},
226 {0x000167d4, 0x00000000},
227 {0x00016800, 0x36db6db6},
228 {0x00016804, 0x6db6db40},
229 {0x00016808, 0x73f00000},
230 {0x0001680c, 0x00000000},
231 {0x00016840, 0x7f80fff8},
232 {0x0001684c, 0x76d005b5},
233 {0x00016850, 0x556cf031},
234 {0x00016854, 0x43449440},
235 {0x00016858, 0x0c51c92c},
236 {0x0001685c, 0x3db7fffc},
237 {0x00016860, 0xfffffffc},
238 {0x00016864, 0x000f0278},
239 {0x0001686c, 0x6db60000},
240 {0x00016900, 0x3fffbe01},
241 {0x00016904, 0xfff80000},
242 {0x00016908, 0x00080010},
243 {0x00016940, 0x10804008},
244 {0x00016944, 0x02084080},
245 {0x00016948, 0x00000000},
246 {0x00016b80, 0x00000000},
247 {0x00016b84, 0x00000000},
248 {0x00016b88, 0x00800700},
249 {0x00016b8c, 0x00800700},
250 {0x00016b90, 0x00800700},
251 {0x00016b94, 0x00000000},
252 {0x00016b98, 0x00000000},
253 {0x00016b9c, 0x00000000},
254 {0x00016ba0, 0x00000001},
255 {0x00016ba4, 0x00000001},
256 {0x00016ba8, 0x00000000},
257 {0x00016bac, 0x00000000},
258 {0x00016bb0, 0x00000000},
259 {0x00016bb4, 0x00000000},
260 {0x00016bb8, 0x00000000},
261 {0x00016bbc, 0x00000000},
262 {0x00016bc0, 0x000000a0},
263 {0x00016bc4, 0x000c0000},
264 {0x00016bc8, 0x14021402},
265 {0x00016bcc, 0x00001402},
266 {0x00016bd0, 0x00000000},
267 {0x00016bd4, 0x00000000},
268};
269
270static const u32 ar9300Common_rx_gain_table_merlin_2p0[][2] = {
271 /* Addr allmodes */
272 {0x0000a000, 0x02000101},
273 {0x0000a004, 0x02000102},
274 {0x0000a008, 0x02000103},
275 {0x0000a00c, 0x02000104},
276 {0x0000a010, 0x02000200},
277 {0x0000a014, 0x02000201},
278 {0x0000a018, 0x02000202},
279 {0x0000a01c, 0x02000203},
280 {0x0000a020, 0x02000204},
281 {0x0000a024, 0x02000205},
282 {0x0000a028, 0x02000208},
283 {0x0000a02c, 0x02000302},
284 {0x0000a030, 0x02000303},
285 {0x0000a034, 0x02000304},
286 {0x0000a038, 0x02000400},
287 {0x0000a03c, 0x02010300},
288 {0x0000a040, 0x02010301},
289 {0x0000a044, 0x02010302},
290 {0x0000a048, 0x02000500},
291 {0x0000a04c, 0x02010400},
292 {0x0000a050, 0x02020300},
293 {0x0000a054, 0x02020301},
294 {0x0000a058, 0x02020302},
295 {0x0000a05c, 0x02020303},
296 {0x0000a060, 0x02020400},
297 {0x0000a064, 0x02030300},
298 {0x0000a068, 0x02030301},
299 {0x0000a06c, 0x02030302},
300 {0x0000a070, 0x02030303},
301 {0x0000a074, 0x02030400},
302 {0x0000a078, 0x02040300},
303 {0x0000a07c, 0x02040301},
304 {0x0000a080, 0x02040302},
305 {0x0000a084, 0x02040303},
306 {0x0000a088, 0x02030500},
307 {0x0000a08c, 0x02040400},
308 {0x0000a090, 0x02050203},
309 {0x0000a094, 0x02050204},
310 {0x0000a098, 0x02050205},
311 {0x0000a09c, 0x02040500},
312 {0x0000a0a0, 0x02050301},
313 {0x0000a0a4, 0x02050302},
314 {0x0000a0a8, 0x02050303},
315 {0x0000a0ac, 0x02050400},
316 {0x0000a0b0, 0x02050401},
317 {0x0000a0b4, 0x02050402},
318 {0x0000a0b8, 0x02050403},
319 {0x0000a0bc, 0x02050500},
320 {0x0000a0c0, 0x02050501},
321 {0x0000a0c4, 0x02050502},
322 {0x0000a0c8, 0x02050503},
323 {0x0000a0cc, 0x02050504},
324 {0x0000a0d0, 0x02050600},
325 {0x0000a0d4, 0x02050601},
326 {0x0000a0d8, 0x02050602},
327 {0x0000a0dc, 0x02050603},
328 {0x0000a0e0, 0x02050604},
329 {0x0000a0e4, 0x02050700},
330 {0x0000a0e8, 0x02050701},
331 {0x0000a0ec, 0x02050702},
332 {0x0000a0f0, 0x02050703},
333 {0x0000a0f4, 0x02050704},
334 {0x0000a0f8, 0x02050705},
335 {0x0000a0fc, 0x02050708},
336 {0x0000a100, 0x02050709},
337 {0x0000a104, 0x0205070a},
338 {0x0000a108, 0x0205070b},
339 {0x0000a10c, 0x0205070c},
340 {0x0000a110, 0x0205070d},
341 {0x0000a114, 0x02050710},
342 {0x0000a118, 0x02050711},
343 {0x0000a11c, 0x02050712},
344 {0x0000a120, 0x02050713},
345 {0x0000a124, 0x02050714},
346 {0x0000a128, 0x02050715},
347 {0x0000a12c, 0x02050730},
348 {0x0000a130, 0x02050731},
349 {0x0000a134, 0x02050732},
350 {0x0000a138, 0x02050733},
351 {0x0000a13c, 0x02050734},
352 {0x0000a140, 0x02050735},
353 {0x0000a144, 0x02050750},
354 {0x0000a148, 0x02050751},
355 {0x0000a14c, 0x02050752},
356 {0x0000a150, 0x02050753},
357 {0x0000a154, 0x02050754},
358 {0x0000a158, 0x02050755},
359 {0x0000a15c, 0x02050770},
360 {0x0000a160, 0x02050771},
361 {0x0000a164, 0x02050772},
362 {0x0000a168, 0x02050773},
363 {0x0000a16c, 0x02050774},
364 {0x0000a170, 0x02050775},
365 {0x0000a174, 0x00000776},
366 {0x0000a178, 0x00000776},
367 {0x0000a17c, 0x00000776},
368 {0x0000a180, 0x00000776},
369 {0x0000a184, 0x00000776},
370 {0x0000a188, 0x00000776},
371 {0x0000a18c, 0x00000776},
372 {0x0000a190, 0x00000776},
373 {0x0000a194, 0x00000776},
374 {0x0000a198, 0x00000776},
375 {0x0000a19c, 0x00000776},
376 {0x0000a1a0, 0x00000776},
377 {0x0000a1a4, 0x00000776},
378 {0x0000a1a8, 0x00000776},
379 {0x0000a1ac, 0x00000776},
380 {0x0000a1b0, 0x00000776},
381 {0x0000a1b4, 0x00000776},
382 {0x0000a1b8, 0x00000776},
383 {0x0000a1bc, 0x00000776},
384 {0x0000a1c0, 0x00000776},
385 {0x0000a1c4, 0x00000776},
386 {0x0000a1c8, 0x00000776},
387 {0x0000a1cc, 0x00000776},
388 {0x0000a1d0, 0x00000776},
389 {0x0000a1d4, 0x00000776},
390 {0x0000a1d8, 0x00000776},
391 {0x0000a1dc, 0x00000776},
392 {0x0000a1e0, 0x00000776},
393 {0x0000a1e4, 0x00000776},
394 {0x0000a1e8, 0x00000776},
395 {0x0000a1ec, 0x00000776},
396 {0x0000a1f0, 0x00000776},
397 {0x0000a1f4, 0x00000776},
398 {0x0000a1f8, 0x00000776},
399 {0x0000a1fc, 0x00000776},
400 {0x0000b000, 0x02000101},
401 {0x0000b004, 0x02000102},
402 {0x0000b008, 0x02000103},
403 {0x0000b00c, 0x02000104},
404 {0x0000b010, 0x02000200},
405 {0x0000b014, 0x02000201},
406 {0x0000b018, 0x02000202},
407 {0x0000b01c, 0x02000203},
408 {0x0000b020, 0x02000204},
409 {0x0000b024, 0x02000205},
410 {0x0000b028, 0x02000208},
411 {0x0000b02c, 0x02000302},
412 {0x0000b030, 0x02000303},
413 {0x0000b034, 0x02000304},
414 {0x0000b038, 0x02000400},
415 {0x0000b03c, 0x02010300},
416 {0x0000b040, 0x02010301},
417 {0x0000b044, 0x02010302},
418 {0x0000b048, 0x02000500},
419 {0x0000b04c, 0x02010400},
420 {0x0000b050, 0x02020300},
421 {0x0000b054, 0x02020301},
422 {0x0000b058, 0x02020302},
423 {0x0000b05c, 0x02020303},
424 {0x0000b060, 0x02020400},
425 {0x0000b064, 0x02030300},
426 {0x0000b068, 0x02030301},
427 {0x0000b06c, 0x02030302},
428 {0x0000b070, 0x02030303},
429 {0x0000b074, 0x02030400},
430 {0x0000b078, 0x02040300},
431 {0x0000b07c, 0x02040301},
432 {0x0000b080, 0x02040302},
433 {0x0000b084, 0x02040303},
434 {0x0000b088, 0x02030500},
435 {0x0000b08c, 0x02040400},
436 {0x0000b090, 0x02050203},
437 {0x0000b094, 0x02050204},
438 {0x0000b098, 0x02050205},
439 {0x0000b09c, 0x02040500},
440 {0x0000b0a0, 0x02050301},
441 {0x0000b0a4, 0x02050302},
442 {0x0000b0a8, 0x02050303},
443 {0x0000b0ac, 0x02050400},
444 {0x0000b0b0, 0x02050401},
445 {0x0000b0b4, 0x02050402},
446 {0x0000b0b8, 0x02050403},
447 {0x0000b0bc, 0x02050500},
448 {0x0000b0c0, 0x02050501},
449 {0x0000b0c4, 0x02050502},
450 {0x0000b0c8, 0x02050503},
451 {0x0000b0cc, 0x02050504},
452 {0x0000b0d0, 0x02050600},
453 {0x0000b0d4, 0x02050601},
454 {0x0000b0d8, 0x02050602},
455 {0x0000b0dc, 0x02050603},
456 {0x0000b0e0, 0x02050604},
457 {0x0000b0e4, 0x02050700},
458 {0x0000b0e8, 0x02050701},
459 {0x0000b0ec, 0x02050702},
460 {0x0000b0f0, 0x02050703},
461 {0x0000b0f4, 0x02050704},
462 {0x0000b0f8, 0x02050705},
463 {0x0000b0fc, 0x02050708},
464 {0x0000b100, 0x02050709},
465 {0x0000b104, 0x0205070a},
466 {0x0000b108, 0x0205070b},
467 {0x0000b10c, 0x0205070c},
468 {0x0000b110, 0x0205070d},
469 {0x0000b114, 0x02050710},
470 {0x0000b118, 0x02050711},
471 {0x0000b11c, 0x02050712},
472 {0x0000b120, 0x02050713},
473 {0x0000b124, 0x02050714},
474 {0x0000b128, 0x02050715},
475 {0x0000b12c, 0x02050730},
476 {0x0000b130, 0x02050731},
477 {0x0000b134, 0x02050732},
478 {0x0000b138, 0x02050733},
479 {0x0000b13c, 0x02050734},
480 {0x0000b140, 0x02050735},
481 {0x0000b144, 0x02050750},
482 {0x0000b148, 0x02050751},
483 {0x0000b14c, 0x02050752},
484 {0x0000b150, 0x02050753},
485 {0x0000b154, 0x02050754},
486 {0x0000b158, 0x02050755},
487 {0x0000b15c, 0x02050770},
488 {0x0000b160, 0x02050771},
489 {0x0000b164, 0x02050772},
490 {0x0000b168, 0x02050773},
491 {0x0000b16c, 0x02050774},
492 {0x0000b170, 0x02050775},
493 {0x0000b174, 0x00000776},
494 {0x0000b178, 0x00000776},
495 {0x0000b17c, 0x00000776},
496 {0x0000b180, 0x00000776},
497 {0x0000b184, 0x00000776},
498 {0x0000b188, 0x00000776},
499 {0x0000b18c, 0x00000776},
500 {0x0000b190, 0x00000776},
501 {0x0000b194, 0x00000776},
502 {0x0000b198, 0x00000776},
503 {0x0000b19c, 0x00000776},
504 {0x0000b1a0, 0x00000776},
505 {0x0000b1a4, 0x00000776},
506 {0x0000b1a8, 0x00000776},
507 {0x0000b1ac, 0x00000776},
508 {0x0000b1b0, 0x00000776},
509 {0x0000b1b4, 0x00000776},
510 {0x0000b1b8, 0x00000776},
511 {0x0000b1bc, 0x00000776},
512 {0x0000b1c0, 0x00000776},
513 {0x0000b1c4, 0x00000776},
514 {0x0000b1c8, 0x00000776},
515 {0x0000b1cc, 0x00000776},
516 {0x0000b1d0, 0x00000776},
517 {0x0000b1d4, 0x00000776},
518 {0x0000b1d8, 0x00000776},
519 {0x0000b1dc, 0x00000776},
520 {0x0000b1e0, 0x00000776},
521 {0x0000b1e4, 0x00000776},
522 {0x0000b1e8, 0x00000776},
523 {0x0000b1ec, 0x00000776},
524 {0x0000b1f0, 0x00000776},
525 {0x0000b1f4, 0x00000776},
526 {0x0000b1f8, 0x00000776},
527 {0x0000b1fc, 0x00000776},
528};
529
530static const u32 ar9300_2p0_mac_postamble[][5] = {
531 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
532 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
533 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
534 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
535 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
536 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
537 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
538 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
539 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
540};
541
542static const u32 ar9300_2p0_soc_postamble[][5] = {
543 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
544 {0x00007010, 0x00000023, 0x00000023, 0x00000022, 0x00000022},
545};
546
547static const u32 ar9200_merlin_2p0_radio_core[][2] = {
548 /* Addr allmodes */
549 {0x00007800, 0x00040000},
550 {0x00007804, 0xdb005012},
551 {0x00007808, 0x04924914},
552 {0x0000780c, 0x21084210},
553 {0x00007810, 0x6d801300},
554 {0x00007814, 0x0019beff},
555 {0x00007818, 0x07e41000},
556 {0x0000781c, 0x00392000},
557 {0x00007820, 0x92592480},
558 {0x00007824, 0x00040000},
559 {0x00007828, 0xdb005012},
560 {0x0000782c, 0x04924914},
561 {0x00007830, 0x21084210},
562 {0x00007834, 0x6d801300},
563 {0x00007838, 0x0019beff},
564 {0x0000783c, 0x07e40000},
565 {0x00007840, 0x00392000},
566 {0x00007844, 0x92592480},
567 {0x00007848, 0x00100000},
568 {0x0000784c, 0x773f0567},
569 {0x00007850, 0x54214514},
570 {0x00007854, 0x12035828},
571 {0x00007858, 0x92592692},
572 {0x0000785c, 0x00000000},
573 {0x00007860, 0x56400000},
574 {0x00007864, 0x0a8e370e},
575 {0x00007868, 0xc0102850},
576 {0x0000786c, 0x812d4000},
577 {0x00007870, 0x807ec400},
578 {0x00007874, 0x001b6db0},
579 {0x00007878, 0x00376b63},
580 {0x0000787c, 0x06db6db6},
581 {0x00007880, 0x006d8000},
582 {0x00007884, 0xffeffffe},
583 {0x00007888, 0xffeffffe},
584 {0x0000788c, 0x00010000},
585 {0x00007890, 0x02060aeb},
586 {0x00007894, 0x5a108000},
587};
588
589static const u32 ar9300_2p0_baseband_postamble[][5] = {
590 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
591 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
592 {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
593 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
594 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
595 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
596 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
597 {0x00009c00, 0x00000044, 0x000000c4, 0x000000c4, 0x00000044},
598 {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
599 {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
600 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
601 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
602 {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
603 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
604 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
605 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
606 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
607 {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
608 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
609 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
610 {0x0000a204, 0x000037c0, 0x000037c4, 0x000037c4, 0x000037c0},
611 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
612 {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
613 {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
614 {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
615 {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
616 {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
617 {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
618 {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
619 {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
620 {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
621 {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
622 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
623 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
624 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
625 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
626 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
627 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
628 {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
629 {0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
630 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
631 {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
632 {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
633 {0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
634 {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
635 {0x0000be04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
636 {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
637 {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
638 {0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
639 {0x0000c284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
640};
641
642static const u32 ar9300_2p0_baseband_core[][2] = {
643 /* Addr allmodes */
644 {0x00009800, 0xafe68e30},
645 {0x00009804, 0xfd14e000},
646 {0x00009808, 0x9c0a9f6b},
647 {0x0000980c, 0x04900000},
648 {0x00009814, 0x9280c00a},
649 {0x00009818, 0x00000000},
650 {0x0000981c, 0x00020028},
651 {0x00009834, 0x5f3ca3de},
652 {0x00009838, 0x0108ecff},
653 {0x0000983c, 0x14750600},
654 {0x00009880, 0x201fff00},
655 {0x00009884, 0x00001042},
656 {0x000098a4, 0x00200400},
657 {0x000098b0, 0x52440bbe},
658 {0x000098d0, 0x004b6a8e},
659 {0x000098d4, 0x00000820},
660 {0x000098dc, 0x00000000},
661 {0x000098f0, 0x00000000},
662 {0x000098f4, 0x00000000},
663 {0x00009c04, 0xff55ff55},
664 {0x00009c08, 0x0320ff55},
665 {0x00009c0c, 0x00000000},
666 {0x00009c10, 0x00000000},
667 {0x00009c14, 0x00046384},
668 {0x00009c18, 0x05b6b440},
669 {0x00009c1c, 0x00b6b440},
670 {0x00009d00, 0xc080a333},
671 {0x00009d04, 0x40206c10},
672 {0x00009d08, 0x009c4060},
673 {0x00009d0c, 0x9883800a},
674 {0x00009d10, 0x01834061},
675 {0x00009d14, 0x00c0040b},
676 {0x00009d18, 0x00000000},
677 {0x00009e08, 0x0038233c},
678 {0x00009e24, 0x990bb515},
679 {0x00009e28, 0x0c6f0000},
680 {0x00009e30, 0x06336f77},
681 {0x00009e34, 0x6af6532f},
682 {0x00009e38, 0x0cc80c00},
683 {0x00009e3c, 0xcf946222},
684 {0x00009e40, 0x0d261820},
685 {0x00009e4c, 0x00001004},
686 {0x00009e50, 0x00ff03f1},
687 {0x00009e54, 0x00000000},
688 {0x00009fc0, 0x803e4788},
689 {0x00009fc4, 0x0001efb5},
690 {0x00009fcc, 0x40000014},
691 {0x00009fd0, 0x01193b93},
692 {0x0000a20c, 0x00000000},
693 {0x0000a220, 0x00000000},
694 {0x0000a224, 0x00000000},
695 {0x0000a228, 0x10002310},
696 {0x0000a22c, 0x01036a1e},
697 {0x0000a234, 0x10000fff},
698 {0x0000a23c, 0x00000000},
699 {0x0000a244, 0x0c000000},
700 {0x0000a2a0, 0x00000001},
701 {0x0000a2c0, 0x00000001},
702 {0x0000a2c8, 0x00000000},
703 {0x0000a2cc, 0x18c43433},
704 {0x0000a2d4, 0x00000000},
705 {0x0000a2dc, 0x00000000},
706 {0x0000a2e0, 0x00000000},
707 {0x0000a2e4, 0x00000000},
708 {0x0000a2e8, 0x00000000},
709 {0x0000a2ec, 0x00000000},
710 {0x0000a2f0, 0x00000000},
711 {0x0000a2f4, 0x00000000},
712 {0x0000a2f8, 0x00000000},
713 {0x0000a344, 0x00000000},
714 {0x0000a34c, 0x00000000},
715 {0x0000a350, 0x0000a000},
716 {0x0000a364, 0x00000000},
717 {0x0000a370, 0x00000000},
718 {0x0000a390, 0x00000001},
719 {0x0000a394, 0x00000444},
720 {0x0000a398, 0x001f0e0f},
721 {0x0000a39c, 0x0075393f},
722 {0x0000a3a0, 0xb79f6427},
723 {0x0000a3a4, 0x00000000},
724 {0x0000a3a8, 0xaaaaaaaa},
725 {0x0000a3ac, 0x3c466478},
726 {0x0000a3c0, 0x20202020},
727 {0x0000a3c4, 0x22222220},
728 {0x0000a3c8, 0x20200020},
729 {0x0000a3cc, 0x20202020},
730 {0x0000a3d0, 0x20202020},
731 {0x0000a3d4, 0x20202020},
732 {0x0000a3d8, 0x20202020},
733 {0x0000a3dc, 0x20202020},
734 {0x0000a3e0, 0x20202020},
735 {0x0000a3e4, 0x20202020},
736 {0x0000a3e8, 0x20202020},
737 {0x0000a3ec, 0x20202020},
738 {0x0000a3f0, 0x00000000},
739 {0x0000a3f4, 0x00000246},
740 {0x0000a3f8, 0x0cdbd380},
741 {0x0000a3fc, 0x000f0f01},
742 {0x0000a400, 0x8fa91f01},
743 {0x0000a404, 0x00000000},
744 {0x0000a408, 0x0e79e5c6},
745 {0x0000a40c, 0x00820820},
746 {0x0000a414, 0x1ce739ce},
747 {0x0000a418, 0x7d001dce},
748 {0x0000a41c, 0x1ce739ce},
749 {0x0000a420, 0x000001ce},
750 {0x0000a424, 0x1ce739ce},
751 {0x0000a428, 0x000001ce},
752 {0x0000a42c, 0x1ce739ce},
753 {0x0000a430, 0x1ce739ce},
754 {0x0000a434, 0x00000000},
755 {0x0000a438, 0x00001801},
756 {0x0000a43c, 0x00000000},
757 {0x0000a440, 0x00000000},
758 {0x0000a444, 0x00000000},
759 {0x0000a448, 0x07000080},
760 {0x0000a44c, 0x00000001},
761 {0x0000a450, 0x00010000},
762 {0x0000a458, 0x00000000},
763 {0x0000a600, 0x00000000},
764 {0x0000a604, 0x00000000},
765 {0x0000a608, 0x00000000},
766 {0x0000a60c, 0x00000000},
767 {0x0000a610, 0x00000000},
768 {0x0000a614, 0x00000000},
769 {0x0000a618, 0x00000000},
770 {0x0000a61c, 0x00000000},
771 {0x0000a620, 0x00000000},
772 {0x0000a624, 0x00000000},
773 {0x0000a628, 0x00000000},
774 {0x0000a62c, 0x00000000},
775 {0x0000a630, 0x00000000},
776 {0x0000a634, 0x00000000},
777 {0x0000a638, 0x00000000},
778 {0x0000a63c, 0x00000000},
779 {0x0000a640, 0x00000000},
780 {0x0000a644, 0x3ffd9d74},
781 {0x0000a648, 0x0048060a},
782 {0x0000a64c, 0x00000637},
783 {0x0000a670, 0x03020100},
784 {0x0000a674, 0x09080504},
785 {0x0000a678, 0x0d0c0b0a},
786 {0x0000a67c, 0x13121110},
787 {0x0000a680, 0x31301514},
788 {0x0000a684, 0x35343332},
789 {0x0000a688, 0x00000036},
790 {0x0000a690, 0x00000838},
791 {0x0000a7c0, 0x00000000},
792 {0x0000a7c4, 0xfffffffc},
793 {0x0000a7c8, 0x00000000},
794 {0x0000a7cc, 0x00000000},
795 {0x0000a7d0, 0x00000000},
796 {0x0000a7d4, 0x00000004},
797 {0x0000a7dc, 0x00000001},
798 {0x0000a8d0, 0x004b6a8e},
799 {0x0000a8d4, 0x00000820},
800 {0x0000a8dc, 0x00000000},
801 {0x0000a8f0, 0x00000000},
802 {0x0000a8f4, 0x00000000},
803 {0x0000b2d0, 0x00000080},
804 {0x0000b2d4, 0x00000000},
805 {0x0000b2dc, 0x00000000},
806 {0x0000b2e0, 0x00000000},
807 {0x0000b2e4, 0x00000000},
808 {0x0000b2e8, 0x00000000},
809 {0x0000b2ec, 0x00000000},
810 {0x0000b2f0, 0x00000000},
811 {0x0000b2f4, 0x00000000},
812 {0x0000b2f8, 0x00000000},
813 {0x0000b408, 0x0e79e5c0},
814 {0x0000b40c, 0x00820820},
815 {0x0000b420, 0x00000000},
816 {0x0000b8d0, 0x004b6a8e},
817 {0x0000b8d4, 0x00000820},
818 {0x0000b8dc, 0x00000000},
819 {0x0000b8f0, 0x00000000},
820 {0x0000b8f4, 0x00000000},
821 {0x0000c2d0, 0x00000080},
822 {0x0000c2d4, 0x00000000},
823 {0x0000c2dc, 0x00000000},
824 {0x0000c2e0, 0x00000000},
825 {0x0000c2e4, 0x00000000},
826 {0x0000c2e8, 0x00000000},
827 {0x0000c2ec, 0x00000000},
828 {0x0000c2f0, 0x00000000},
829 {0x0000c2f4, 0x00000000},
830 {0x0000c2f8, 0x00000000},
831 {0x0000c408, 0x0e79e5c0},
832 {0x0000c40c, 0x00820820},
833 {0x0000c420, 0x00000000},
834};
835
836static const u32 ar9300Modes_high_power_tx_gain_table_2p0[][5] = {
837 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
838 {0x0000a410, 0x000050db, 0x000050db, 0x000050d9, 0x000050d9},
839 {0x0000a500, 0x00020220, 0x00020220, 0x00000000, 0x00000000},
840 {0x0000a504, 0x06020223, 0x06020223, 0x04000002, 0x04000002},
841 {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
842 {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
843 {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
844 {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
845 {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
846 {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
847 {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
848 {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
849 {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
850 {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
851 {0x0000a530, 0x34043643, 0x34043643, 0x2a000e20, 0x2a000e20},
852 {0x0000a534, 0x38043a44, 0x38043a44, 0x2e000e22, 0x2e000e22},
853 {0x0000a538, 0x3b043e45, 0x3b043e45, 0x31000e24, 0x31000e24},
854 {0x0000a53c, 0x40063e46, 0x40063e46, 0x34001640, 0x34001640},
855 {0x0000a540, 0x44083e46, 0x44083e46, 0x38001660, 0x38001660},
856 {0x0000a544, 0x46083e66, 0x46083e66, 0x3b001861, 0x3b001861},
857 {0x0000a548, 0x4b0a3e69, 0x4b0a3e69, 0x3e001a81, 0x3e001a81},
858 {0x0000a54c, 0x4f0a5e66, 0x4f0a5e66, 0x42001a83, 0x42001a83},
859 {0x0000a550, 0x540a7e66, 0x540a7e66, 0x44001c84, 0x44001c84},
860 {0x0000a554, 0x570a7e89, 0x570a7e89, 0x48001ce3, 0x48001ce3},
861 {0x0000a558, 0x5c0e7e8a, 0x5c0e7e8a, 0x4c001ce5, 0x4c001ce5},
862 {0x0000a55c, 0x60127e8b, 0x60127e8b, 0x50001ce9, 0x50001ce9},
863 {0x0000a560, 0x65127ecc, 0x65127ecc, 0x54001ceb, 0x54001ceb},
864 {0x0000a564, 0x6b169ecd, 0x6b169ecd, 0x56001eec, 0x56001eec},
865 {0x0000a568, 0x70169f0e, 0x70169f0e, 0x56001eec, 0x56001eec},
866 {0x0000a56c, 0x75169f4f, 0x75169f4f, 0x56001eec, 0x56001eec},
867 {0x0000a570, 0x75169f4f, 0x75169f4f, 0x56001eec, 0x56001eec},
868 {0x0000a574, 0x75169f4f, 0x75169f4f, 0x56001eec, 0x56001eec},
869 {0x0000a578, 0x75169f4f, 0x75169f4f, 0x56001eec, 0x56001eec},
870 {0x0000a57c, 0x75169f4f, 0x75169f4f, 0x56001eec, 0x56001eec},
871 {0x0000a580, 0x00820220, 0x00820220, 0x00800000, 0x00800000},
872 {0x0000a584, 0x06820223, 0x06820223, 0x04800002, 0x04800002},
873 {0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004},
874 {0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200},
875 {0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202},
876 {0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400},
877 {0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402},
878 {0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404},
879 {0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603},
880 {0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02},
881 {0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04},
882 {0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20},
883 {0x0000a5b0, 0x34843643, 0x34843643, 0x2a800e20, 0x2a800e20},
884 {0x0000a5b4, 0x38843a44, 0x38843a44, 0x2e800e22, 0x2e800e22},
885 {0x0000a5b8, 0x3b843e45, 0x3b843e45, 0x31800e24, 0x31800e24},
886 {0x0000a5bc, 0x40863e46, 0x40863e46, 0x34801640, 0x34801640},
887 {0x0000a5c0, 0x4c8a3065, 0x44883e46, 0x44883e46, 0x38801660},
888 {0x0000a5c4, 0x46883e66, 0x46883e66, 0x3b801861, 0x3b801861},
889 {0x0000a5c8, 0x4b8a3e69, 0x4b8a3e69, 0x3e801a81, 0x3e801a81},
890 {0x0000a5cc, 0x4f8a5e66, 0x4f8a5e66, 0x42801a83, 0x42801a83},
891 {0x0000a5d0, 0x548a7e66, 0x548a7e66, 0x44801c84, 0x44801c84},
892 {0x0000a5d4, 0x578a7e89, 0x578a7e89, 0x48801ce3, 0x48801ce3},
893 {0x0000a5d8, 0x5c8e7e8a, 0x5c8e7e8a, 0x4c801ce5, 0x4c801ce5},
894 {0x0000a5dc, 0x60927e8b, 0x60927e8b, 0x50801ce9, 0x50801ce9},
895 {0x0000a5e0, 0x65927ecc, 0x65927ecc, 0x54801ceb, 0x54801ceb},
896 {0x0000a5e4, 0x6b969ecd, 0x6b969ecd, 0x56801eec, 0x56801eec},
897 {0x0000a5e8, 0x70969f0e, 0x70969f0e, 0x56801eec, 0x56801eec},
898 {0x0000a5ec, 0x75969f4f, 0x75969f4f, 0x56801eec, 0x56801eec},
899 {0x0000a5f0, 0x75969f4f, 0x75969f4f, 0x56801eec, 0x56801eec},
900 {0x0000a5f4, 0x75969f4f, 0x75969f4f, 0x56801eec, 0x56801eec},
901 {0x0000a5f8, 0x75969f4f, 0x75969f4f, 0x56801eec, 0x56801eec},
902 {0x0000a5fc, 0x75969f4f, 0x75969f4f, 0x56801eec, 0x56801eec},
903 {0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
904 {0x00016048, 0xad241a61, 0xad241a61, 0xad241a61, 0xad241a61},
905 {0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
906 {0x00016444, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
907 {0x00016448, 0xad241a61, 0xad241a61, 0xad241a61, 0xad241a61},
908 {0x00016468, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
909 {0x00016844, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
910 {0x00016848, 0xad241a61, 0xad241a61, 0xad241a61, 0xad241a61},
911 {0x00016868, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
912};
913
914static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p0[][5] = {
915 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
916 {0x0000a410, 0x000050db, 0x000050db, 0x000050d9, 0x000050d9},
917 {0x0000a500, 0x00020220, 0x00020220, 0x00000000, 0x00000000},
918 {0x0000a504, 0x06020223, 0x06020223, 0x04000002, 0x04000002},
919 {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
920 {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
921 {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
922 {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
923 {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
924 {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
925 {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
926 {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
927 {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
928 {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
929 {0x0000a530, 0x34043643, 0x34043643, 0x2a000e20, 0x2a000e20},
930 {0x0000a534, 0x38043a44, 0x38043a44, 0x2e000e22, 0x2e000e22},
931 {0x0000a538, 0x3b043e45, 0x3b043e45, 0x31000e24, 0x31000e24},
932 {0x0000a53c, 0x40063e46, 0x40063e46, 0x34001640, 0x34001640},
933 {0x0000a540, 0x44083e46, 0x44083e46, 0x38001660, 0x38001660},
934 {0x0000a544, 0x46083e66, 0x46083e66, 0x3b001861, 0x3b001861},
935 {0x0000a548, 0x4b0a3e69, 0x4b0a3e69, 0x3e001a81, 0x3e001a81},
936 {0x0000a54c, 0x4f0a5e66, 0x4f0a5e66, 0x42001a83, 0x42001a83},
937 {0x0000a550, 0x540a7e66, 0x540a7e66, 0x44001c84, 0x44001c84},
938 {0x0000a554, 0x570a7e89, 0x570a7e89, 0x48001ce3, 0x48001ce3},
939 {0x0000a558, 0x5c0e7e8a, 0x5c0e7e8a, 0x4c001ce5, 0x4c001ce5},
940 {0x0000a55c, 0x60127e8b, 0x60127e8b, 0x50001ce9, 0x50001ce9},
941 {0x0000a560, 0x65127ecc, 0x65127ecc, 0x54001ceb, 0x54001ceb},
942 {0x0000a564, 0x6b169ecd, 0x6b169ecd, 0x56001eec, 0x56001eec},
943 {0x0000a568, 0x70169f0e, 0x70169f0e, 0x56001eec, 0x56001eec},
944 {0x0000a56c, 0x75169f4f, 0x75169f4f, 0x56001eec, 0x56001eec},
945 {0x0000a570, 0x75169f4f, 0x75169f4f, 0x56001eec, 0x56001eec},
946 {0x0000a574, 0x75169f4f, 0x75169f4f, 0x56001eec, 0x56001eec},
947 {0x0000a578, 0x75169f4f, 0x75169f4f, 0x56001eec, 0x56001eec},
948 {0x0000a57c, 0x75169f4f, 0x75169f4f, 0x56001eec, 0x56001eec},
949 {0x0000a580, 0x00820220, 0x00820220, 0x00800000, 0x00800000},
950 {0x0000a584, 0x06820223, 0x06820223, 0x04800002, 0x04800002},
951 {0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004},
952 {0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200},
953 {0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202},
954 {0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400},
955 {0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402},
956 {0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404},
957 {0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603},
958 {0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02},
959 {0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04},
960 {0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20},
961 {0x0000a5b0, 0x34843643, 0x34843643, 0x2a800e20, 0x2a800e20},
962 {0x0000a5b4, 0x38843a44, 0x38843a44, 0x2e800e22, 0x2e800e22},
963 {0x0000a5b8, 0x3b843e45, 0x3b843e45, 0x31800e24, 0x31800e24},
964 {0x0000a5bc, 0x40863e46, 0x40863e46, 0x34801640, 0x34801640},
965 {0x0000a5c0, 0x44883e46, 0x44883e46, 0x38801660, 0x38801660},
966 {0x0000a5c4, 0x46883e66, 0x46883e66, 0x3b801861, 0x3b801861},
967 {0x0000a5c8, 0x4b8a3e69, 0x4b8a3e69, 0x3e801a81, 0x3e801a81},
968 {0x0000a5cc, 0x4f8a5e66, 0x4f8a5e66, 0x42801a83, 0x42801a83},
969 {0x0000a5d0, 0x548a7e66, 0x548a7e66, 0x44801c84, 0x44801c84},
970 {0x0000a5d4, 0x578a7e89, 0x578a7e89, 0x48801ce3, 0x48801ce3},
971 {0x0000a5d8, 0x5c8e7e8a, 0x5c8e7e8a, 0x4c801ce5, 0x4c801ce5},
972 {0x0000a5dc, 0x60927e8b, 0x60927e8b, 0x50801ce9, 0x50801ce9},
973 {0x0000a5e0, 0x65927ecc, 0x65927ecc, 0x54801ceb, 0x54801ceb},
974 {0x0000a5e4, 0x6b969ecd, 0x6b969ecd, 0x56801eec, 0x56801eec},
975 {0x0000a5e8, 0x70969f0e, 0x70969f0e, 0x56801eec, 0x56801eec},
976 {0x0000a5ec, 0x75969f4f, 0x75969f4f, 0x56801eec, 0x56801eec},
977 {0x0000a5f0, 0x75969f4f, 0x75969f4f, 0x56801eec, 0x56801eec},
978 {0x0000a5f4, 0x75969f4f, 0x75969f4f, 0x56801eec, 0x56801eec},
979 {0x0000a5f8, 0x75969f4f, 0x75969f4f, 0x56801eec, 0x56801eec},
980 {0x0000a5fc, 0x75969f4f, 0x75969f4f, 0x56801eec, 0x56801eec},
981 {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
982 {0x00016048, 0x8c001a61, 0x8c001a61, 0x8c001a61, 0x8c001a61},
983 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
984 {0x00016444, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
985 {0x00016448, 0x8c001a61, 0x8c001a61, 0x8c001a61, 0x8c001a61},
986 {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
987 {0x00016844, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
988 {0x00016848, 0x8c001a61, 0x8c001a61, 0x8c001a61, 0x8c001a61},
989 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
990};
991
992static const u32 ar9300Common_rx_gain_table_2p0[][2] = {
993 /* Addr allmodes */
994 {0x0000a000, 0x00010000},
995 {0x0000a004, 0x00030002},
996 {0x0000a008, 0x00050004},
997 {0x0000a00c, 0x00810080},
998 {0x0000a010, 0x01800082},
999 {0x0000a014, 0x01820181},
1000 {0x0000a018, 0x01840183},
1001 {0x0000a01c, 0x01880185},
1002 {0x0000a020, 0x018a0189},
1003 {0x0000a024, 0x02850284},
1004 {0x0000a028, 0x02890288},
1005 {0x0000a02c, 0x028b028a},
1006 {0x0000a030, 0x028d028c},
1007 {0x0000a034, 0x02910290},
1008 {0x0000a038, 0x02930292},
1009 {0x0000a03c, 0x03910390},
1010 {0x0000a040, 0x03930392},
1011 {0x0000a044, 0x03950394},
1012 {0x0000a048, 0x00000396},
1013 {0x0000a04c, 0x00000000},
1014 {0x0000a050, 0x00000000},
1015 {0x0000a054, 0x00000000},
1016 {0x0000a058, 0x00000000},
1017 {0x0000a05c, 0x00000000},
1018 {0x0000a060, 0x00000000},
1019 {0x0000a064, 0x00000000},
1020 {0x0000a068, 0x00000000},
1021 {0x0000a06c, 0x00000000},
1022 {0x0000a070, 0x00000000},
1023 {0x0000a074, 0x00000000},
1024 {0x0000a078, 0x00000000},
1025 {0x0000a07c, 0x00000000},
1026 {0x0000a080, 0x28282828},
1027 {0x0000a084, 0x21212128},
1028 {0x0000a088, 0x21212121},
1029 {0x0000a08c, 0x1c1c1c21},
1030 {0x0000a090, 0x1c1c1c1c},
1031 {0x0000a094, 0x17171c1c},
1032 {0x0000a098, 0x02020212},
1033 {0x0000a09c, 0x02020202},
1034 {0x0000a0a0, 0x00000000},
1035 {0x0000a0a4, 0x00000000},
1036 {0x0000a0a8, 0x00000000},
1037 {0x0000a0ac, 0x00000000},
1038 {0x0000a0b0, 0x00000000},
1039 {0x0000a0b4, 0x00000000},
1040 {0x0000a0b8, 0x00000000},
1041 {0x0000a0bc, 0x00000000},
1042 {0x0000a0c0, 0x001f0000},
1043 {0x0000a0c4, 0x011f0100},
1044 {0x0000a0c8, 0x011d011e},
1045 {0x0000a0cc, 0x011b011c},
1046 {0x0000a0d0, 0x02030204},
1047 {0x0000a0d4, 0x02010202},
1048 {0x0000a0d8, 0x021f0200},
1049 {0x0000a0dc, 0x021d021e},
1050 {0x0000a0e0, 0x03010302},
1051 {0x0000a0e4, 0x031f0300},
1052 {0x0000a0e8, 0x0402031e},
1053 {0x0000a0ec, 0x04000401},
1054 {0x0000a0f0, 0x041e041f},
1055 {0x0000a0f4, 0x05010502},
1056 {0x0000a0f8, 0x051f0500},
1057 {0x0000a0fc, 0x0602051e},
1058 {0x0000a100, 0x06000601},
1059 {0x0000a104, 0x061e061f},
1060 {0x0000a108, 0x0703061d},
1061 {0x0000a10c, 0x07010702},
1062 {0x0000a110, 0x00000700},
1063 {0x0000a114, 0x00000000},
1064 {0x0000a118, 0x00000000},
1065 {0x0000a11c, 0x00000000},
1066 {0x0000a120, 0x00000000},
1067 {0x0000a124, 0x00000000},
1068 {0x0000a128, 0x00000000},
1069 {0x0000a12c, 0x00000000},
1070 {0x0000a130, 0x00000000},
1071 {0x0000a134, 0x00000000},
1072 {0x0000a138, 0x00000000},
1073 {0x0000a13c, 0x00000000},
1074 {0x0000a140, 0x001f0000},
1075 {0x0000a144, 0x011f0100},
1076 {0x0000a148, 0x011d011e},
1077 {0x0000a14c, 0x011b011c},
1078 {0x0000a150, 0x02030204},
1079 {0x0000a154, 0x02010202},
1080 {0x0000a158, 0x021f0200},
1081 {0x0000a15c, 0x021d021e},
1082 {0x0000a160, 0x03010302},
1083 {0x0000a164, 0x031f0300},
1084 {0x0000a168, 0x0402031e},
1085 {0x0000a16c, 0x04000401},
1086 {0x0000a170, 0x041e041f},
1087 {0x0000a174, 0x05010502},
1088 {0x0000a178, 0x051f0500},
1089 {0x0000a17c, 0x0602051e},
1090 {0x0000a180, 0x06000601},
1091 {0x0000a184, 0x061e061f},
1092 {0x0000a188, 0x0703061d},
1093 {0x0000a18c, 0x07010702},
1094 {0x0000a190, 0x00000700},
1095 {0x0000a194, 0x00000000},
1096 {0x0000a198, 0x00000000},
1097 {0x0000a19c, 0x00000000},
1098 {0x0000a1a0, 0x00000000},
1099 {0x0000a1a4, 0x00000000},
1100 {0x0000a1a8, 0x00000000},
1101 {0x0000a1ac, 0x00000000},
1102 {0x0000a1b0, 0x00000000},
1103 {0x0000a1b4, 0x00000000},
1104 {0x0000a1b8, 0x00000000},
1105 {0x0000a1bc, 0x00000000},
1106 {0x0000a1c0, 0x00000000},
1107 {0x0000a1c4, 0x00000000},
1108 {0x0000a1c8, 0x00000000},
1109 {0x0000a1cc, 0x00000000},
1110 {0x0000a1d0, 0x00000000},
1111 {0x0000a1d4, 0x00000000},
1112 {0x0000a1d8, 0x00000000},
1113 {0x0000a1dc, 0x00000000},
1114 {0x0000a1e0, 0x00000000},
1115 {0x0000a1e4, 0x00000000},
1116 {0x0000a1e8, 0x00000000},
1117 {0x0000a1ec, 0x00000000},
1118 {0x0000a1f0, 0x00000396},
1119 {0x0000a1f4, 0x00000396},
1120 {0x0000a1f8, 0x00000396},
1121 {0x0000a1fc, 0x00000196},
1122 {0x0000b000, 0x00010000},
1123 {0x0000b004, 0x00030002},
1124 {0x0000b008, 0x00050004},
1125 {0x0000b00c, 0x00810080},
1126 {0x0000b010, 0x00830082},
1127 {0x0000b014, 0x01810180},
1128 {0x0000b018, 0x01830182},
1129 {0x0000b01c, 0x01850184},
1130 {0x0000b020, 0x02810280},
1131 {0x0000b024, 0x02830282},
1132 {0x0000b028, 0x02850284},
1133 {0x0000b02c, 0x02890288},
1134 {0x0000b030, 0x028b028a},
1135 {0x0000b034, 0x0388028c},
1136 {0x0000b038, 0x038a0389},
1137 {0x0000b03c, 0x038c038b},
1138 {0x0000b040, 0x0390038d},
1139 {0x0000b044, 0x03920391},
1140 {0x0000b048, 0x03940393},
1141 {0x0000b04c, 0x03960395},
1142 {0x0000b050, 0x00000000},
1143 {0x0000b054, 0x00000000},
1144 {0x0000b058, 0x00000000},
1145 {0x0000b05c, 0x00000000},
1146 {0x0000b060, 0x00000000},
1147 {0x0000b064, 0x00000000},
1148 {0x0000b068, 0x00000000},
1149 {0x0000b06c, 0x00000000},
1150 {0x0000b070, 0x00000000},
1151 {0x0000b074, 0x00000000},
1152 {0x0000b078, 0x00000000},
1153 {0x0000b07c, 0x00000000},
1154 {0x0000b080, 0x32323232},
1155 {0x0000b084, 0x2f2f3232},
1156 {0x0000b088, 0x23282a2d},
1157 {0x0000b08c, 0x1c1e2123},
1158 {0x0000b090, 0x14171919},
1159 {0x0000b094, 0x0e0e1214},
1160 {0x0000b098, 0x03050707},
1161 {0x0000b09c, 0x00030303},
1162 {0x0000b0a0, 0x00000000},
1163 {0x0000b0a4, 0x00000000},
1164 {0x0000b0a8, 0x00000000},
1165 {0x0000b0ac, 0x00000000},
1166 {0x0000b0b0, 0x00000000},
1167 {0x0000b0b4, 0x00000000},
1168 {0x0000b0b8, 0x00000000},
1169 {0x0000b0bc, 0x00000000},
1170 {0x0000b0c0, 0x003f0020},
1171 {0x0000b0c4, 0x00400041},
1172 {0x0000b0c8, 0x0140005f},
1173 {0x0000b0cc, 0x0160015f},
1174 {0x0000b0d0, 0x017e017f},
1175 {0x0000b0d4, 0x02410242},
1176 {0x0000b0d8, 0x025f0240},
1177 {0x0000b0dc, 0x027f0260},
1178 {0x0000b0e0, 0x0341027e},
1179 {0x0000b0e4, 0x035f0340},
1180 {0x0000b0e8, 0x037f0360},
1181 {0x0000b0ec, 0x04400441},
1182 {0x0000b0f0, 0x0460045f},
1183 {0x0000b0f4, 0x0541047f},
1184 {0x0000b0f8, 0x055f0540},
1185 {0x0000b0fc, 0x057f0560},
1186 {0x0000b100, 0x06400641},
1187 {0x0000b104, 0x0660065f},
1188 {0x0000b108, 0x067e067f},
1189 {0x0000b10c, 0x07410742},
1190 {0x0000b110, 0x075f0740},
1191 {0x0000b114, 0x077f0760},
1192 {0x0000b118, 0x07800781},
1193 {0x0000b11c, 0x07a0079f},
1194 {0x0000b120, 0x07c107bf},
1195 {0x0000b124, 0x000007c0},
1196 {0x0000b128, 0x00000000},
1197 {0x0000b12c, 0x00000000},
1198 {0x0000b130, 0x00000000},
1199 {0x0000b134, 0x00000000},
1200 {0x0000b138, 0x00000000},
1201 {0x0000b13c, 0x00000000},
1202 {0x0000b140, 0x003f0020},
1203 {0x0000b144, 0x00400041},
1204 {0x0000b148, 0x0140005f},
1205 {0x0000b14c, 0x0160015f},
1206 {0x0000b150, 0x017e017f},
1207 {0x0000b154, 0x02410242},
1208 {0x0000b158, 0x025f0240},
1209 {0x0000b15c, 0x027f0260},
1210 {0x0000b160, 0x0341027e},
1211 {0x0000b164, 0x035f0340},
1212 {0x0000b168, 0x037f0360},
1213 {0x0000b16c, 0x04400441},
1214 {0x0000b170, 0x0460045f},
1215 {0x0000b174, 0x0541047f},
1216 {0x0000b178, 0x055f0540},
1217 {0x0000b17c, 0x057f0560},
1218 {0x0000b180, 0x06400641},
1219 {0x0000b184, 0x0660065f},
1220 {0x0000b188, 0x067e067f},
1221 {0x0000b18c, 0x07410742},
1222 {0x0000b190, 0x075f0740},
1223 {0x0000b194, 0x077f0760},
1224 {0x0000b198, 0x07800781},
1225 {0x0000b19c, 0x07a0079f},
1226 {0x0000b1a0, 0x07c107bf},
1227 {0x0000b1a4, 0x000007c0},
1228 {0x0000b1a8, 0x00000000},
1229 {0x0000b1ac, 0x00000000},
1230 {0x0000b1b0, 0x00000000},
1231 {0x0000b1b4, 0x00000000},
1232 {0x0000b1b8, 0x00000000},
1233 {0x0000b1bc, 0x00000000},
1234 {0x0000b1c0, 0x00000000},
1235 {0x0000b1c4, 0x00000000},
1236 {0x0000b1c8, 0x00000000},
1237 {0x0000b1cc, 0x00000000},
1238 {0x0000b1d0, 0x00000000},
1239 {0x0000b1d4, 0x00000000},
1240 {0x0000b1d8, 0x00000000},
1241 {0x0000b1dc, 0x00000000},
1242 {0x0000b1e0, 0x00000000},
1243 {0x0000b1e4, 0x00000000},
1244 {0x0000b1e8, 0x00000000},
1245 {0x0000b1ec, 0x00000000},
1246 {0x0000b1f0, 0x00000396},
1247 {0x0000b1f4, 0x00000396},
1248 {0x0000b1f8, 0x00000396},
1249 {0x0000b1fc, 0x00000196},
1250};
1251
1252static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p0[][5] = {
1253 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1254 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
1255 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1256 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
1257 {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
1258 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
1259 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
1260 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
1261 {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
1262 {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
1263 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
1264 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
1265 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
1266 {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
1267 {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
1268 {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
1269 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
1270 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
1271 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
1272 {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
1273 {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
1274 {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
1275 {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
1276 {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
1277 {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
1278 {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
1279 {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
1280 {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1281 {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1282 {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1283 {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1284 {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1285 {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1286 {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1287 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
1288 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
1289 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
1290 {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
1291 {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
1292 {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
1293 {0x0000a598, 0x21820220, 0x21820220, 0x16800402, 0x16800402},
1294 {0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
1295 {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
1296 {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
1297 {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
1298 {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
1299 {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
1300 {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
1301 {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
1302 {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
1303 {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
1304 {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
1305 {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
1306 {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x47801a83, 0x47801a83},
1307 {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4a801c84, 0x4a801c84},
1308 {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4e801ce3, 0x4e801ce3},
1309 {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x52801ce5, 0x52801ce5},
1310 {0x0000a5dc, 0x7086308c, 0x7086308c, 0x56801ce9, 0x56801ce9},
1311 {0x0000a5e0, 0x738a308a, 0x738a308a, 0x5a801ceb, 0x5a801ceb},
1312 {0x0000a5e4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1313 {0x0000a5e8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1314 {0x0000a5ec, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1315 {0x0000a5f0, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1316 {0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1317 {0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1318 {0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1319 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
1320 {0x00016048, 0x64001a61, 0x64001a61, 0x64001a61, 0x64001a61},
1321 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
1322 {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
1323 {0x00016448, 0x64001a61, 0x64001a61, 0x64001a61, 0x64001a61},
1324 {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
1325 {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
1326 {0x00016848, 0x64001a61, 0x64001a61, 0x64001a61, 0x64001a61},
1327 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
1328};
1329
1330static const u32 ar9300_2p0_mac_core[][2] = {
1331 /* Addr allmodes */
1332 {0x00000008, 0x00000000},
1333 {0x00000030, 0x00020085},
1334 {0x00000034, 0x00000005},
1335 {0x00000040, 0x00000000},
1336 {0x00000044, 0x00000000},
1337 {0x00000048, 0x00000008},
1338 {0x0000004c, 0x00000010},
1339 {0x00000050, 0x00000000},
1340 {0x00001040, 0x002ffc0f},
1341 {0x00001044, 0x002ffc0f},
1342 {0x00001048, 0x002ffc0f},
1343 {0x0000104c, 0x002ffc0f},
1344 {0x00001050, 0x002ffc0f},
1345 {0x00001054, 0x002ffc0f},
1346 {0x00001058, 0x002ffc0f},
1347 {0x0000105c, 0x002ffc0f},
1348 {0x00001060, 0x002ffc0f},
1349 {0x00001064, 0x002ffc0f},
1350 {0x000010f0, 0x00000100},
1351 {0x00001270, 0x00000000},
1352 {0x000012b0, 0x00000000},
1353 {0x000012f0, 0x00000000},
1354 {0x0000143c, 0x00000000},
1355 {0x0000147c, 0x00000000},
1356 {0x00008000, 0x00000000},
1357 {0x00008004, 0x00000000},
1358 {0x00008008, 0x00000000},
1359 {0x0000800c, 0x00000000},
1360 {0x00008018, 0x00000000},
1361 {0x00008020, 0x00000000},
1362 {0x00008038, 0x00000000},
1363 {0x0000803c, 0x00000000},
1364 {0x00008040, 0x00000000},
1365 {0x00008044, 0x00000000},
1366 {0x00008048, 0x00000000},
1367 {0x0000804c, 0xffffffff},
1368 {0x00008054, 0x00000000},
1369 {0x00008058, 0x00000000},
1370 {0x0000805c, 0x000fc78f},
1371 {0x00008060, 0x0000000f},
1372 {0x00008064, 0x00000000},
1373 {0x00008070, 0x00000310},
1374 {0x00008074, 0x00000020},
1375 {0x00008078, 0x00000000},
1376 {0x0000809c, 0x0000000f},
1377 {0x000080a0, 0x00000000},
1378 {0x000080a4, 0x02ff0000},
1379 {0x000080a8, 0x0e070605},
1380 {0x000080ac, 0x0000000d},
1381 {0x000080b0, 0x00000000},
1382 {0x000080b4, 0x00000000},
1383 {0x000080b8, 0x00000000},
1384 {0x000080bc, 0x00000000},
1385 {0x000080c0, 0x2a800000},
1386 {0x000080c4, 0x06900168},
1387 {0x000080c8, 0x13881c20},
1388 {0x000080cc, 0x01f40000},
1389 {0x000080d0, 0x00252500},
1390 {0x000080d4, 0x00a00000},
1391 {0x000080d8, 0x00400000},
1392 {0x000080dc, 0x00000000},
1393 {0x000080e0, 0xffffffff},
1394 {0x000080e4, 0x0000ffff},
1395 {0x000080e8, 0x3f3f3f3f},
1396 {0x000080ec, 0x00000000},
1397 {0x000080f0, 0x00000000},
1398 {0x000080f4, 0x00000000},
1399 {0x000080fc, 0x00020000},
1400 {0x00008100, 0x00000000},
1401 {0x00008108, 0x00000052},
1402 {0x0000810c, 0x00000000},
1403 {0x00008110, 0x00000000},
1404 {0x00008114, 0x000007ff},
1405 {0x00008118, 0x000000aa},
1406 {0x0000811c, 0x00003210},
1407 {0x00008124, 0x00000000},
1408 {0x00008128, 0x00000000},
1409 {0x0000812c, 0x00000000},
1410 {0x00008130, 0x00000000},
1411 {0x00008134, 0x00000000},
1412 {0x00008138, 0x00000000},
1413 {0x0000813c, 0x0000ffff},
1414 {0x00008144, 0xffffffff},
1415 {0x00008168, 0x00000000},
1416 {0x0000816c, 0x00000000},
1417 {0x00008170, 0x18486200},
1418 {0x00008174, 0x33332210},
1419 {0x00008178, 0x00000000},
1420 {0x0000817c, 0x00020000},
1421 {0x000081c0, 0x00000000},
1422 {0x000081c4, 0x33332210},
1423 {0x000081c8, 0x00000000},
1424 {0x000081cc, 0x00000000},
1425 {0x000081d4, 0x00000000},
1426 {0x000081ec, 0x00000000},
1427 {0x000081f0, 0x00000000},
1428 {0x000081f4, 0x00000000},
1429 {0x000081f8, 0x00000000},
1430 {0x000081fc, 0x00000000},
1431 {0x00008240, 0x00100000},
1432 {0x00008244, 0x0010f424},
1433 {0x00008248, 0x00000800},
1434 {0x0000824c, 0x0001e848},
1435 {0x00008250, 0x00000000},
1436 {0x00008254, 0x00000000},
1437 {0x00008258, 0x00000000},
1438 {0x0000825c, 0x40000000},
1439 {0x00008260, 0x00080922},
1440 {0x00008264, 0x98a00010},
1441 {0x00008268, 0xffffffff},
1442 {0x0000826c, 0x0000ffff},
1443 {0x00008270, 0x00000000},
1444 {0x00008274, 0x40000000},
1445 {0x00008278, 0x003e4180},
1446 {0x0000827c, 0x00000004},
1447 {0x00008284, 0x0000002c},
1448 {0x00008288, 0x0000002c},
1449 {0x0000828c, 0x000000ff},
1450 {0x00008294, 0x00000000},
1451 {0x00008298, 0x00000000},
1452 {0x0000829c, 0x00000000},
1453 {0x00008300, 0x00000140},
1454 {0x00008314, 0x00000000},
1455 {0x0000831c, 0x0000010d},
1456 {0x00008328, 0x00000000},
1457 {0x0000832c, 0x00000007},
1458 {0x00008330, 0x00000302},
1459 {0x00008334, 0x00000700},
1460 {0x00008338, 0x00ff0000},
1461 {0x0000833c, 0x02400000},
1462 {0x00008340, 0x000107ff},
1463 {0x00008344, 0xaa48105b},
1464 {0x00008348, 0x008f0000},
1465 {0x0000835c, 0x00000000},
1466 {0x00008360, 0xffffffff},
1467 {0x00008364, 0xffffffff},
1468 {0x00008368, 0x00000000},
1469 {0x00008370, 0x00000000},
1470 {0x00008374, 0x000000ff},
1471 {0x00008378, 0x00000000},
1472 {0x0000837c, 0x00000000},
1473 {0x00008380, 0xffffffff},
1474 {0x00008384, 0xffffffff},
1475 {0x00008390, 0xffffffff},
1476 {0x00008394, 0xffffffff},
1477 {0x00008398, 0x00000000},
1478 {0x0000839c, 0x00000000},
1479 {0x000083a0, 0x00000000},
1480 {0x000083a4, 0x0000fa14},
1481 {0x000083a8, 0x000f0c00},
1482 {0x000083ac, 0x33332210},
1483 {0x000083b0, 0x33332210},
1484 {0x000083b4, 0x33332210},
1485 {0x000083b8, 0x33332210},
1486 {0x000083bc, 0x00000000},
1487 {0x000083c0, 0x00000000},
1488 {0x000083c4, 0x00000000},
1489 {0x000083c8, 0x00000000},
1490 {0x000083cc, 0x00000200},
1491 {0x000083d0, 0x000301ff},
1492};
1493
1494static const u32 ar9300Common_wo_xlna_rx_gain_table_2p0[][2] = {
1495 /* Addr allmodes */
1496 {0x0000a000, 0x00010000},
1497 {0x0000a004, 0x00030002},
1498 {0x0000a008, 0x00050004},
1499 {0x0000a00c, 0x00810080},
1500 {0x0000a010, 0x01800082},
1501 {0x0000a014, 0x01820181},
1502 {0x0000a018, 0x01840183},
1503 {0x0000a01c, 0x01880185},
1504 {0x0000a020, 0x018a0189},
1505 {0x0000a024, 0x02850284},
1506 {0x0000a028, 0x02890288},
1507 {0x0000a02c, 0x03850384},
1508 {0x0000a030, 0x03890388},
1509 {0x0000a034, 0x038b038a},
1510 {0x0000a038, 0x038d038c},
1511 {0x0000a03c, 0x03910390},
1512 {0x0000a040, 0x03930392},
1513 {0x0000a044, 0x03950394},
1514 {0x0000a048, 0x00000396},
1515 {0x0000a04c, 0x00000000},
1516 {0x0000a050, 0x00000000},
1517 {0x0000a054, 0x00000000},
1518 {0x0000a058, 0x00000000},
1519 {0x0000a05c, 0x00000000},
1520 {0x0000a060, 0x00000000},
1521 {0x0000a064, 0x00000000},
1522 {0x0000a068, 0x00000000},
1523 {0x0000a06c, 0x00000000},
1524 {0x0000a070, 0x00000000},
1525 {0x0000a074, 0x00000000},
1526 {0x0000a078, 0x00000000},
1527 {0x0000a07c, 0x00000000},
1528 {0x0000a080, 0x28282828},
1529 {0x0000a084, 0x28282828},
1530 {0x0000a088, 0x28282828},
1531 {0x0000a08c, 0x28282828},
1532 {0x0000a090, 0x28282828},
1533 {0x0000a094, 0x21212128},
1534 {0x0000a098, 0x171c1c1c},
1535 {0x0000a09c, 0x02020212},
1536 {0x0000a0a0, 0x00000202},
1537 {0x0000a0a4, 0x00000000},
1538 {0x0000a0a8, 0x00000000},
1539 {0x0000a0ac, 0x00000000},
1540 {0x0000a0b0, 0x00000000},
1541 {0x0000a0b4, 0x00000000},
1542 {0x0000a0b8, 0x00000000},
1543 {0x0000a0bc, 0x00000000},
1544 {0x0000a0c0, 0x001f0000},
1545 {0x0000a0c4, 0x011f0100},
1546 {0x0000a0c8, 0x011d011e},
1547 {0x0000a0cc, 0x011b011c},
1548 {0x0000a0d0, 0x02030204},
1549 {0x0000a0d4, 0x02010202},
1550 {0x0000a0d8, 0x021f0200},
1551 {0x0000a0dc, 0x021d021e},
1552 {0x0000a0e0, 0x03010302},
1553 {0x0000a0e4, 0x031f0300},
1554 {0x0000a0e8, 0x0402031e},
1555 {0x0000a0ec, 0x04000401},
1556 {0x0000a0f0, 0x041e041f},
1557 {0x0000a0f4, 0x05010502},
1558 {0x0000a0f8, 0x051f0500},
1559 {0x0000a0fc, 0x0602051e},
1560 {0x0000a100, 0x06000601},
1561 {0x0000a104, 0x061e061f},
1562 {0x0000a108, 0x0703061d},
1563 {0x0000a10c, 0x07010702},
1564 {0x0000a110, 0x00000700},
1565 {0x0000a114, 0x00000000},
1566 {0x0000a118, 0x00000000},
1567 {0x0000a11c, 0x00000000},
1568 {0x0000a120, 0x00000000},
1569 {0x0000a124, 0x00000000},
1570 {0x0000a128, 0x00000000},
1571 {0x0000a12c, 0x00000000},
1572 {0x0000a130, 0x00000000},
1573 {0x0000a134, 0x00000000},
1574 {0x0000a138, 0x00000000},
1575 {0x0000a13c, 0x00000000},
1576 {0x0000a140, 0x001f0000},
1577 {0x0000a144, 0x011f0100},
1578 {0x0000a148, 0x011d011e},
1579 {0x0000a14c, 0x011b011c},
1580 {0x0000a150, 0x02030204},
1581 {0x0000a154, 0x02010202},
1582 {0x0000a158, 0x021f0200},
1583 {0x0000a15c, 0x021d021e},
1584 {0x0000a160, 0x03010302},
1585 {0x0000a164, 0x031f0300},
1586 {0x0000a168, 0x0402031e},
1587 {0x0000a16c, 0x04000401},
1588 {0x0000a170, 0x041e041f},
1589 {0x0000a174, 0x05010502},
1590 {0x0000a178, 0x051f0500},
1591 {0x0000a17c, 0x0602051e},
1592 {0x0000a180, 0x06000601},
1593 {0x0000a184, 0x061e061f},
1594 {0x0000a188, 0x0703061d},
1595 {0x0000a18c, 0x07010702},
1596 {0x0000a190, 0x00000700},
1597 {0x0000a194, 0x00000000},
1598 {0x0000a198, 0x00000000},
1599 {0x0000a19c, 0x00000000},
1600 {0x0000a1a0, 0x00000000},
1601 {0x0000a1a4, 0x00000000},
1602 {0x0000a1a8, 0x00000000},
1603 {0x0000a1ac, 0x00000000},
1604 {0x0000a1b0, 0x00000000},
1605 {0x0000a1b4, 0x00000000},
1606 {0x0000a1b8, 0x00000000},
1607 {0x0000a1bc, 0x00000000},
1608 {0x0000a1c0, 0x00000000},
1609 {0x0000a1c4, 0x00000000},
1610 {0x0000a1c8, 0x00000000},
1611 {0x0000a1cc, 0x00000000},
1612 {0x0000a1d0, 0x00000000},
1613 {0x0000a1d4, 0x00000000},
1614 {0x0000a1d8, 0x00000000},
1615 {0x0000a1dc, 0x00000000},
1616 {0x0000a1e0, 0x00000000},
1617 {0x0000a1e4, 0x00000000},
1618 {0x0000a1e8, 0x00000000},
1619 {0x0000a1ec, 0x00000000},
1620 {0x0000a1f0, 0x00000396},
1621 {0x0000a1f4, 0x00000396},
1622 {0x0000a1f8, 0x00000396},
1623 {0x0000a1fc, 0x00000296},
1624 {0x0000b000, 0x00010000},
1625 {0x0000b004, 0x00030002},
1626 {0x0000b008, 0x00050004},
1627 {0x0000b00c, 0x00810080},
1628 {0x0000b010, 0x00830082},
1629 {0x0000b014, 0x01810180},
1630 {0x0000b018, 0x01830182},
1631 {0x0000b01c, 0x01850184},
1632 {0x0000b020, 0x02810280},
1633 {0x0000b024, 0x02830282},
1634 {0x0000b028, 0x02850284},
1635 {0x0000b02c, 0x02890288},
1636 {0x0000b030, 0x028b028a},
1637 {0x0000b034, 0x0388028c},
1638 {0x0000b038, 0x038a0389},
1639 {0x0000b03c, 0x038c038b},
1640 {0x0000b040, 0x0390038d},
1641 {0x0000b044, 0x03920391},
1642 {0x0000b048, 0x03940393},
1643 {0x0000b04c, 0x03960395},
1644 {0x0000b050, 0x00000000},
1645 {0x0000b054, 0x00000000},
1646 {0x0000b058, 0x00000000},
1647 {0x0000b05c, 0x00000000},
1648 {0x0000b060, 0x00000000},
1649 {0x0000b064, 0x00000000},
1650 {0x0000b068, 0x00000000},
1651 {0x0000b06c, 0x00000000},
1652 {0x0000b070, 0x00000000},
1653 {0x0000b074, 0x00000000},
1654 {0x0000b078, 0x00000000},
1655 {0x0000b07c, 0x00000000},
1656 {0x0000b080, 0x32323232},
1657 {0x0000b084, 0x2f2f3232},
1658 {0x0000b088, 0x23282a2d},
1659 {0x0000b08c, 0x1c1e2123},
1660 {0x0000b090, 0x14171919},
1661 {0x0000b094, 0x0e0e1214},
1662 {0x0000b098, 0x03050707},
1663 {0x0000b09c, 0x00030303},
1664 {0x0000b0a0, 0x00000000},
1665 {0x0000b0a4, 0x00000000},
1666 {0x0000b0a8, 0x00000000},
1667 {0x0000b0ac, 0x00000000},
1668 {0x0000b0b0, 0x00000000},
1669 {0x0000b0b4, 0x00000000},
1670 {0x0000b0b8, 0x00000000},
1671 {0x0000b0bc, 0x00000000},
1672 {0x0000b0c0, 0x003f0020},
1673 {0x0000b0c4, 0x00400041},
1674 {0x0000b0c8, 0x0140005f},
1675 {0x0000b0cc, 0x0160015f},
1676 {0x0000b0d0, 0x017e017f},
1677 {0x0000b0d4, 0x02410242},
1678 {0x0000b0d8, 0x025f0240},
1679 {0x0000b0dc, 0x027f0260},
1680 {0x0000b0e0, 0x0341027e},
1681 {0x0000b0e4, 0x035f0340},
1682 {0x0000b0e8, 0x037f0360},
1683 {0x0000b0ec, 0x04400441},
1684 {0x0000b0f0, 0x0460045f},
1685 {0x0000b0f4, 0x0541047f},
1686 {0x0000b0f8, 0x055f0540},
1687 {0x0000b0fc, 0x057f0560},
1688 {0x0000b100, 0x06400641},
1689 {0x0000b104, 0x0660065f},
1690 {0x0000b108, 0x067e067f},
1691 {0x0000b10c, 0x07410742},
1692 {0x0000b110, 0x075f0740},
1693 {0x0000b114, 0x077f0760},
1694 {0x0000b118, 0x07800781},
1695 {0x0000b11c, 0x07a0079f},
1696 {0x0000b120, 0x07c107bf},
1697 {0x0000b124, 0x000007c0},
1698 {0x0000b128, 0x00000000},
1699 {0x0000b12c, 0x00000000},
1700 {0x0000b130, 0x00000000},
1701 {0x0000b134, 0x00000000},
1702 {0x0000b138, 0x00000000},
1703 {0x0000b13c, 0x00000000},
1704 {0x0000b140, 0x003f0020},
1705 {0x0000b144, 0x00400041},
1706 {0x0000b148, 0x0140005f},
1707 {0x0000b14c, 0x0160015f},
1708 {0x0000b150, 0x017e017f},
1709 {0x0000b154, 0x02410242},
1710 {0x0000b158, 0x025f0240},
1711 {0x0000b15c, 0x027f0260},
1712 {0x0000b160, 0x0341027e},
1713 {0x0000b164, 0x035f0340},
1714 {0x0000b168, 0x037f0360},
1715 {0x0000b16c, 0x04400441},
1716 {0x0000b170, 0x0460045f},
1717 {0x0000b174, 0x0541047f},
1718 {0x0000b178, 0x055f0540},
1719 {0x0000b17c, 0x057f0560},
1720 {0x0000b180, 0x06400641},
1721 {0x0000b184, 0x0660065f},
1722 {0x0000b188, 0x067e067f},
1723 {0x0000b18c, 0x07410742},
1724 {0x0000b190, 0x075f0740},
1725 {0x0000b194, 0x077f0760},
1726 {0x0000b198, 0x07800781},
1727 {0x0000b19c, 0x07a0079f},
1728 {0x0000b1a0, 0x07c107bf},
1729 {0x0000b1a4, 0x000007c0},
1730 {0x0000b1a8, 0x00000000},
1731 {0x0000b1ac, 0x00000000},
1732 {0x0000b1b0, 0x00000000},
1733 {0x0000b1b4, 0x00000000},
1734 {0x0000b1b8, 0x00000000},
1735 {0x0000b1bc, 0x00000000},
1736 {0x0000b1c0, 0x00000000},
1737 {0x0000b1c4, 0x00000000},
1738 {0x0000b1c8, 0x00000000},
1739 {0x0000b1cc, 0x00000000},
1740 {0x0000b1d0, 0x00000000},
1741 {0x0000b1d4, 0x00000000},
1742 {0x0000b1d8, 0x00000000},
1743 {0x0000b1dc, 0x00000000},
1744 {0x0000b1e0, 0x00000000},
1745 {0x0000b1e4, 0x00000000},
1746 {0x0000b1e8, 0x00000000},
1747 {0x0000b1ec, 0x00000000},
1748 {0x0000b1f0, 0x00000396},
1749 {0x0000b1f4, 0x00000396},
1750 {0x0000b1f8, 0x00000396},
1751 {0x0000b1fc, 0x00000196},
1752};
1753
1754static const u32 ar9300_2p0_soc_preamble[][2] = {
1755 /* Addr allmodes */
1756 {0x000040a4, 0x00a0c1c9},
1757 {0x00007008, 0x00000000},
1758 {0x00007020, 0x00000000},
1759 {0x00007034, 0x00000002},
1760 {0x00007038, 0x000004c2},
1761};
1762
1763static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p0[][2] = {
1764 /* Addr allmodes */
1765 {0x00004040, 0x08212e5e},
1766 {0x00004040, 0x0008003b},
1767 {0x00004044, 0x00000000},
1768};
1769
1770static const u32 ar9300PciePhy_clkreq_enable_L1_2p0[][2] = {
1771 /* Addr allmodes */
1772 {0x00004040, 0x08253e5e},
1773 {0x00004040, 0x0008003b},
1774 {0x00004044, 0x00000000},
1775};
1776
1777static const u32 ar9300PciePhy_clkreq_disable_L1_2p0[][2] = {
1778 /* Addr allmodes */
1779 {0x00004040, 0x08213e5e},
1780 {0x00004040, 0x0008003b},
1781 {0x00004044, 0x00000000},
1782};
1783
1784#endif /* INITVALS_9003_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
new file mode 100644
index 000000000000..37ba37481a47
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -0,0 +1,614 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#include "hw.h"
17#include "ar9003_mac.h"
18
19static void ar9003_hw_rx_enable(struct ath_hw *hw)
20{
21 REG_WRITE(hw, AR_CR, 0);
22}
23
24static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
25{
26 int checksum;
27
28 checksum = ads->info + ads->link
29 + ads->data0 + ads->ctl3
30 + ads->data1 + ads->ctl5
31 + ads->data2 + ads->ctl7
32 + ads->data3 + ads->ctl9;
33
34 return ((checksum & 0xffff) + (checksum >> 16)) & AR_TxPtrChkSum;
35}
36
37static void ar9003_hw_set_desc_link(void *ds, u32 ds_link)
38{
39 struct ar9003_txc *ads = ds;
40
41 ads->link = ds_link;
42 ads->ctl10 &= ~AR_TxPtrChkSum;
43 ads->ctl10 |= ar9003_calc_ptr_chksum(ads);
44}
45
46static void ar9003_hw_get_desc_link(void *ds, u32 **ds_link)
47{
48 struct ar9003_txc *ads = ds;
49
50 *ds_link = &ads->link;
51}
52
53static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
54{
55 u32 isr = 0;
56 u32 mask2 = 0;
57 struct ath9k_hw_capabilities *pCap = &ah->caps;
58 u32 sync_cause = 0;
59 struct ath_common *common = ath9k_hw_common(ah);
60
61 if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
62 if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
63 == AR_RTC_STATUS_ON)
64 isr = REG_READ(ah, AR_ISR);
65 }
66
67 sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
68
69 *masked = 0;
70
71 if (!isr && !sync_cause)
72 return false;
73
74 if (isr) {
75 if (isr & AR_ISR_BCNMISC) {
76 u32 isr2;
77 isr2 = REG_READ(ah, AR_ISR_S2);
78
79 mask2 |= ((isr2 & AR_ISR_S2_TIM) >>
80 MAP_ISR_S2_TIM);
81 mask2 |= ((isr2 & AR_ISR_S2_DTIM) >>
82 MAP_ISR_S2_DTIM);
83 mask2 |= ((isr2 & AR_ISR_S2_DTIMSYNC) >>
84 MAP_ISR_S2_DTIMSYNC);
85 mask2 |= ((isr2 & AR_ISR_S2_CABEND) >>
86 MAP_ISR_S2_CABEND);
87 mask2 |= ((isr2 & AR_ISR_S2_GTT) <<
88 MAP_ISR_S2_GTT);
89 mask2 |= ((isr2 & AR_ISR_S2_CST) <<
90 MAP_ISR_S2_CST);
91 mask2 |= ((isr2 & AR_ISR_S2_TSFOOR) >>
92 MAP_ISR_S2_TSFOOR);
93
94 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
95 REG_WRITE(ah, AR_ISR_S2, isr2);
96 isr &= ~AR_ISR_BCNMISC;
97 }
98 }
99
100 if ((pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED))
101 isr = REG_READ(ah, AR_ISR_RAC);
102
103 if (isr == 0xffffffff) {
104 *masked = 0;
105 return false;
106 }
107
108 *masked = isr & ATH9K_INT_COMMON;
109
110 if (ah->config.rx_intr_mitigation)
111 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
112 *masked |= ATH9K_INT_RXLP;
113
114 if (ah->config.tx_intr_mitigation)
115 if (isr & (AR_ISR_TXMINTR | AR_ISR_TXINTM))
116 *masked |= ATH9K_INT_TX;
117
118 if (isr & (AR_ISR_LP_RXOK | AR_ISR_RXERR))
119 *masked |= ATH9K_INT_RXLP;
120
121 if (isr & AR_ISR_HP_RXOK)
122 *masked |= ATH9K_INT_RXHP;
123
124 if (isr & (AR_ISR_TXOK | AR_ISR_TXERR | AR_ISR_TXEOL)) {
125 *masked |= ATH9K_INT_TX;
126
127 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
128 u32 s0, s1;
129 s0 = REG_READ(ah, AR_ISR_S0);
130 REG_WRITE(ah, AR_ISR_S0, s0);
131 s1 = REG_READ(ah, AR_ISR_S1);
132 REG_WRITE(ah, AR_ISR_S1, s1);
133
134 isr &= ~(AR_ISR_TXOK | AR_ISR_TXERR |
135 AR_ISR_TXEOL);
136 }
137 }
138
139 if (isr & AR_ISR_GENTMR) {
140 u32 s5;
141
142 if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
143 s5 = REG_READ(ah, AR_ISR_S5_S);
144 else
145 s5 = REG_READ(ah, AR_ISR_S5);
146
147 ah->intr_gen_timer_trigger =
148 MS(s5, AR_ISR_S5_GENTIMER_TRIG);
149
150 ah->intr_gen_timer_thresh =
151 MS(s5, AR_ISR_S5_GENTIMER_THRESH);
152
153 if (ah->intr_gen_timer_trigger)
154 *masked |= ATH9K_INT_GENTIMER;
155
156 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
157 REG_WRITE(ah, AR_ISR_S5, s5);
158 isr &= ~AR_ISR_GENTMR;
159 }
160
161 }
162
163 *masked |= mask2;
164
165 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
166 REG_WRITE(ah, AR_ISR, isr);
167
168 (void) REG_READ(ah, AR_ISR);
169 }
170 }
171
172 if (sync_cause) {
173 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
174 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
175 REG_WRITE(ah, AR_RC, 0);
176 *masked |= ATH9K_INT_FATAL;
177 }
178
179 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
180 ath_print(common, ATH_DBG_INTERRUPT,
181 "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
182
183 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
184 (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
185
186 }
187 return true;
188}
189
190static void ar9003_hw_fill_txdesc(struct ath_hw *ah, void *ds, u32 seglen,
191 bool is_firstseg, bool is_lastseg,
192 const void *ds0, dma_addr_t buf_addr,
193 unsigned int qcu)
194{
195 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
196 unsigned int descid = 0;
197
198 ads->info = (ATHEROS_VENDOR_ID << AR_DescId_S) |
199 (1 << AR_TxRxDesc_S) |
200 (1 << AR_CtrlStat_S) |
201 (qcu << AR_TxQcuNum_S) | 0x17;
202
203 ads->data0 = buf_addr;
204 ads->data1 = 0;
205 ads->data2 = 0;
206 ads->data3 = 0;
207
208 ads->ctl3 = (seglen << AR_BufLen_S);
209 ads->ctl3 &= AR_BufLen;
210
211 /* Fill in pointer checksum and descriptor id */
212 ads->ctl10 = ar9003_calc_ptr_chksum(ads);
213 ads->ctl10 |= (descid << AR_TxDescId_S);
214
215 if (is_firstseg) {
216 ads->ctl12 |= (is_lastseg ? 0 : AR_TxMore);
217 } else if (is_lastseg) {
218 ads->ctl11 = 0;
219 ads->ctl12 = 0;
220 ads->ctl13 = AR9003TXC_CONST(ds0)->ctl13;
221 ads->ctl14 = AR9003TXC_CONST(ds0)->ctl14;
222 } else {
223 /* XXX Intermediate descriptor in a multi-descriptor frame.*/
224 ads->ctl11 = 0;
225 ads->ctl12 = AR_TxMore;
226 ads->ctl13 = 0;
227 ads->ctl14 = 0;
228 }
229}
230
231static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
232 struct ath_tx_status *ts)
233{
234 struct ar9003_txs *ads;
235
236 ads = &ah->ts_ring[ah->ts_tail];
237
238 if ((ads->status8 & AR_TxDone) == 0)
239 return -EINPROGRESS;
240
241 ah->ts_tail = (ah->ts_tail + 1) % ah->ts_size;
242
243 if ((MS(ads->ds_info, AR_DescId) != ATHEROS_VENDOR_ID) ||
244 (MS(ads->ds_info, AR_TxRxDesc) != 1)) {
245 ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT,
246 "Tx Descriptor error %x\n", ads->ds_info);
247 memset(ads, 0, sizeof(*ads));
248 return -EIO;
249 }
250
251 ts->qid = MS(ads->ds_info, AR_TxQcuNum);
252 ts->desc_id = MS(ads->status1, AR_TxDescId);
253 ts->ts_seqnum = MS(ads->status8, AR_SeqNum);
254 ts->ts_tstamp = ads->status4;
255 ts->ts_status = 0;
256 ts->ts_flags = 0;
257
258 if (ads->status3 & AR_ExcessiveRetries)
259 ts->ts_status |= ATH9K_TXERR_XRETRY;
260 if (ads->status3 & AR_Filtered)
261 ts->ts_status |= ATH9K_TXERR_FILT;
262 if (ads->status3 & AR_FIFOUnderrun) {
263 ts->ts_status |= ATH9K_TXERR_FIFO;
264 ath9k_hw_updatetxtriglevel(ah, true);
265 }
266 if (ads->status8 & AR_TxOpExceeded)
267 ts->ts_status |= ATH9K_TXERR_XTXOP;
268 if (ads->status3 & AR_TxTimerExpired)
269 ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
270
271 if (ads->status3 & AR_DescCfgErr)
272 ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
273 if (ads->status3 & AR_TxDataUnderrun) {
274 ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
275 ath9k_hw_updatetxtriglevel(ah, true);
276 }
277 if (ads->status3 & AR_TxDelimUnderrun) {
278 ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
279 ath9k_hw_updatetxtriglevel(ah, true);
280 }
281 if (ads->status2 & AR_TxBaStatus) {
282 ts->ts_flags |= ATH9K_TX_BA;
283 ts->ba_low = ads->status5;
284 ts->ba_high = ads->status6;
285 }
286
287 ts->ts_rateindex = MS(ads->status8, AR_FinalTxIdx);
288
289 ts->ts_rssi = MS(ads->status7, AR_TxRSSICombined);
290 ts->ts_rssi_ctl0 = MS(ads->status2, AR_TxRSSIAnt00);
291 ts->ts_rssi_ctl1 = MS(ads->status2, AR_TxRSSIAnt01);
292 ts->ts_rssi_ctl2 = MS(ads->status2, AR_TxRSSIAnt02);
293 ts->ts_rssi_ext0 = MS(ads->status7, AR_TxRSSIAnt10);
294 ts->ts_rssi_ext1 = MS(ads->status7, AR_TxRSSIAnt11);
295 ts->ts_rssi_ext2 = MS(ads->status7, AR_TxRSSIAnt12);
296 ts->ts_shortretry = MS(ads->status3, AR_RTSFailCnt);
297 ts->ts_longretry = MS(ads->status3, AR_DataFailCnt);
298 ts->ts_virtcol = MS(ads->status3, AR_VirtRetryCnt);
299 ts->ts_antenna = 0;
300
301 ts->tid = MS(ads->status8, AR_TxTid);
302
303 memset(ads, 0, sizeof(*ads));
304
305 return 0;
306}
307
308static void ar9003_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
309 u32 pktlen, enum ath9k_pkt_type type, u32 txpower,
310 u32 keyIx, enum ath9k_key_type keyType, u32 flags)
311{
312 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
313
314 if (txpower > ah->txpower_limit)
315 txpower = ah->txpower_limit;
316
317 txpower += ah->txpower_indexoffset;
318 if (txpower > 63)
319 txpower = 63;
320
321 ads->ctl11 = (pktlen & AR_FrameLen)
322 | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
323 | SM(txpower, AR_XmitPower)
324 | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
325 | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
326 | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
327 | (flags & ATH9K_TXDESC_LOWRXCHAIN ? AR_LowRxChain : 0);
328
329 ads->ctl12 =
330 (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
331 | SM(type, AR_FrameType)
332 | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
333 | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
334 | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
335
336 ads->ctl17 = SM(keyType, AR_EncrType) |
337 (flags & ATH9K_TXDESC_LDPC ? AR_LDPC : 0);
338 ads->ctl18 = 0;
339 ads->ctl19 = AR_Not_Sounding;
340
341 ads->ctl20 = 0;
342 ads->ctl21 = 0;
343 ads->ctl22 = 0;
344}
345
346static void ar9003_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
347 void *lastds,
348 u32 durUpdateEn, u32 rtsctsRate,
349 u32 rtsctsDuration,
350 struct ath9k_11n_rate_series series[],
351 u32 nseries, u32 flags)
352{
353 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
354 struct ar9003_txc *last_ads = (struct ar9003_txc *) lastds;
355 u_int32_t ctl11;
356
357 if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
358 ctl11 = ads->ctl11;
359
360 if (flags & ATH9K_TXDESC_RTSENA) {
361 ctl11 &= ~AR_CTSEnable;
362 ctl11 |= AR_RTSEnable;
363 } else {
364 ctl11 &= ~AR_RTSEnable;
365 ctl11 |= AR_CTSEnable;
366 }
367
368 ads->ctl11 = ctl11;
369 } else {
370 ads->ctl11 = (ads->ctl11 & ~(AR_RTSEnable | AR_CTSEnable));
371 }
372
373 ads->ctl13 = set11nTries(series, 0)
374 | set11nTries(series, 1)
375 | set11nTries(series, 2)
376 | set11nTries(series, 3)
377 | (durUpdateEn ? AR_DurUpdateEna : 0)
378 | SM(0, AR_BurstDur);
379
380 ads->ctl14 = set11nRate(series, 0)
381 | set11nRate(series, 1)
382 | set11nRate(series, 2)
383 | set11nRate(series, 3);
384
385 ads->ctl15 = set11nPktDurRTSCTS(series, 0)
386 | set11nPktDurRTSCTS(series, 1);
387
388 ads->ctl16 = set11nPktDurRTSCTS(series, 2)
389 | set11nPktDurRTSCTS(series, 3);
390
391 ads->ctl18 = set11nRateFlags(series, 0)
392 | set11nRateFlags(series, 1)
393 | set11nRateFlags(series, 2)
394 | set11nRateFlags(series, 3)
395 | SM(rtsctsRate, AR_RTSCTSRate);
396 ads->ctl19 = AR_Not_Sounding;
397
398 last_ads->ctl13 = ads->ctl13;
399 last_ads->ctl14 = ads->ctl14;
400}
401
402static void ar9003_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
403 u32 aggrLen)
404{
405 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
406
407 ads->ctl12 |= (AR_IsAggr | AR_MoreAggr);
408
409 ads->ctl17 &= ~AR_AggrLen;
410 ads->ctl17 |= SM(aggrLen, AR_AggrLen);
411}
412
413static void ar9003_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
414 u32 numDelims)
415{
416 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
417 unsigned int ctl17;
418
419 ads->ctl12 |= (AR_IsAggr | AR_MoreAggr);
420
421 /*
422 * We use a stack variable to manipulate ctl6 to reduce uncached
423 * read modify, modfiy, write.
424 */
425 ctl17 = ads->ctl17;
426 ctl17 &= ~AR_PadDelim;
427 ctl17 |= SM(numDelims, AR_PadDelim);
428 ads->ctl17 = ctl17;
429}
430
431static void ar9003_hw_set11n_aggr_last(struct ath_hw *ah, void *ds)
432{
433 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
434
435 ads->ctl12 |= AR_IsAggr;
436 ads->ctl12 &= ~AR_MoreAggr;
437 ads->ctl17 &= ~AR_PadDelim;
438}
439
440static void ar9003_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
441{
442 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
443
444 ads->ctl12 &= (~AR_IsAggr & ~AR_MoreAggr);
445}
446
447static void ar9003_hw_set11n_burstduration(struct ath_hw *ah, void *ds,
448 u32 burstDuration)
449{
450 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
451
452 ads->ctl13 &= ~AR_BurstDur;
453 ads->ctl13 |= SM(burstDuration, AR_BurstDur);
454
455}
456
457static void ar9003_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
458 u32 vmf)
459{
460 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
461
462 if (vmf)
463 ads->ctl11 |= AR_VirtMoreFrag;
464 else
465 ads->ctl11 &= ~AR_VirtMoreFrag;
466}
467
468void ar9003_hw_attach_mac_ops(struct ath_hw *hw)
469{
470 struct ath_hw_ops *ops = ath9k_hw_ops(hw);
471
472 ops->rx_enable = ar9003_hw_rx_enable;
473 ops->set_desc_link = ar9003_hw_set_desc_link;
474 ops->get_desc_link = ar9003_hw_get_desc_link;
475 ops->get_isr = ar9003_hw_get_isr;
476 ops->fill_txdesc = ar9003_hw_fill_txdesc;
477 ops->proc_txdesc = ar9003_hw_proc_txdesc;
478 ops->set11n_txdesc = ar9003_hw_set11n_txdesc;
479 ops->set11n_ratescenario = ar9003_hw_set11n_ratescenario;
480 ops->set11n_aggr_first = ar9003_hw_set11n_aggr_first;
481 ops->set11n_aggr_middle = ar9003_hw_set11n_aggr_middle;
482 ops->set11n_aggr_last = ar9003_hw_set11n_aggr_last;
483 ops->clr11n_aggr = ar9003_hw_clr11n_aggr;
484 ops->set11n_burstduration = ar9003_hw_set11n_burstduration;
485 ops->set11n_virtualmorefrag = ar9003_hw_set11n_virtualmorefrag;
486}
487
488void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size)
489{
490 REG_WRITE(ah, AR_DATABUF_SIZE, buf_size & AR_DATABUF_SIZE_MASK);
491}
492EXPORT_SYMBOL(ath9k_hw_set_rx_bufsize);
493
494void ath9k_hw_addrxbuf_edma(struct ath_hw *ah, u32 rxdp,
495 enum ath9k_rx_qtype qtype)
496{
497 if (qtype == ATH9K_RX_QUEUE_HP)
498 REG_WRITE(ah, AR_HP_RXDP, rxdp);
499 else
500 REG_WRITE(ah, AR_LP_RXDP, rxdp);
501}
502EXPORT_SYMBOL(ath9k_hw_addrxbuf_edma);
503
504int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
505 void *buf_addr)
506{
507 struct ar9003_rxs *rxsp = (struct ar9003_rxs *) buf_addr;
508 unsigned int phyerr;
509
510 /* TODO: byte swap on big endian for ar9300_10 */
511
512 if ((rxsp->status11 & AR_RxDone) == 0)
513 return -EINPROGRESS;
514
515 if (MS(rxsp->ds_info, AR_DescId) != 0x168c)
516 return -EINVAL;
517
518 if ((rxsp->ds_info & (AR_TxRxDesc | AR_CtrlStat)) != 0)
519 return -EINPROGRESS;
520
521 if (!rxs)
522 return 0;
523
524 rxs->rs_status = 0;
525 rxs->rs_flags = 0;
526
527 rxs->rs_datalen = rxsp->status2 & AR_DataLen;
528 rxs->rs_tstamp = rxsp->status3;
529
530 /* XXX: Keycache */
531 rxs->rs_rssi = MS(rxsp->status5, AR_RxRSSICombined);
532 rxs->rs_rssi_ctl0 = MS(rxsp->status1, AR_RxRSSIAnt00);
533 rxs->rs_rssi_ctl1 = MS(rxsp->status1, AR_RxRSSIAnt01);
534 rxs->rs_rssi_ctl2 = MS(rxsp->status1, AR_RxRSSIAnt02);
535 rxs->rs_rssi_ext0 = MS(rxsp->status5, AR_RxRSSIAnt10);
536 rxs->rs_rssi_ext1 = MS(rxsp->status5, AR_RxRSSIAnt11);
537 rxs->rs_rssi_ext2 = MS(rxsp->status5, AR_RxRSSIAnt12);
538
539 if (rxsp->status11 & AR_RxKeyIdxValid)
540 rxs->rs_keyix = MS(rxsp->status11, AR_KeyIdx);
541 else
542 rxs->rs_keyix = ATH9K_RXKEYIX_INVALID;
543
544 rxs->rs_rate = MS(rxsp->status1, AR_RxRate);
545 rxs->rs_more = (rxsp->status2 & AR_RxMore) ? 1 : 0;
546
547 rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0;
548 rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0;
549 rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7);
550 rxs->rs_flags = (rxsp->status4 & AR_GI) ? ATH9K_RX_GI : 0;
551 rxs->rs_flags |= (rxsp->status4 & AR_2040) ? ATH9K_RX_2040 : 0;
552
553 rxs->evm0 = rxsp->status6;
554 rxs->evm1 = rxsp->status7;
555 rxs->evm2 = rxsp->status8;
556 rxs->evm3 = rxsp->status9;
557 rxs->evm4 = (rxsp->status10 & 0xffff);
558
559 if (rxsp->status11 & AR_PreDelimCRCErr)
560 rxs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
561
562 if (rxsp->status11 & AR_PostDelimCRCErr)
563 rxs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
564
565 if (rxsp->status11 & AR_DecryptBusyErr)
566 rxs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
567
568 if ((rxsp->status11 & AR_RxFrameOK) == 0) {
569 if (rxsp->status11 & AR_CRCErr) {
570 rxs->rs_status |= ATH9K_RXERR_CRC;
571 } else if (rxsp->status11 & AR_PHYErr) {
572 rxs->rs_status |= ATH9K_RXERR_PHY;
573 phyerr = MS(rxsp->status11, AR_PHYErrCode);
574 rxs->rs_phyerr = phyerr;
575 } else if (rxsp->status11 & AR_DecryptCRCErr) {
576 rxs->rs_status |= ATH9K_RXERR_DECRYPT;
577 } else if (rxsp->status11 & AR_MichaelErr) {
578 rxs->rs_status |= ATH9K_RXERR_MIC;
579 }
580 }
581
582 return 0;
583}
584EXPORT_SYMBOL(ath9k_hw_process_rxdesc_edma);
585
586void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah)
587{
588 ah->ts_tail = 0;
589
590 memset((void *) ah->ts_ring, 0,
591 ah->ts_size * sizeof(struct ar9003_txs));
592
593 ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT,
594 "TS Start 0x%x End 0x%x Virt %p, Size %d\n",
595 ah->ts_paddr_start, ah->ts_paddr_end,
596 ah->ts_ring, ah->ts_size);
597
598 REG_WRITE(ah, AR_Q_STATUS_RING_START, ah->ts_paddr_start);
599 REG_WRITE(ah, AR_Q_STATUS_RING_END, ah->ts_paddr_end);
600}
601
602void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
603 u32 ts_paddr_start,
604 u8 size)
605{
606
607 ah->ts_paddr_start = ts_paddr_start;
608 ah->ts_paddr_end = ts_paddr_start + (size * sizeof(struct ar9003_txs));
609 ah->ts_size = size;
610 ah->ts_ring = (struct ar9003_txs *) ts_start;
611
612 ath9k_hw_reset_txstatus_ring(ah);
613}
614EXPORT_SYMBOL(ath9k_hw_setup_statusring);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
new file mode 100644
index 000000000000..f17558b14539
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
@@ -0,0 +1,120 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef AR9003_MAC_H
18#define AR9003_MAC_H
19
20#define AR_DescId 0xffff0000
21#define AR_DescId_S 16
22#define AR_CtrlStat 0x00004000
23#define AR_CtrlStat_S 14
24#define AR_TxRxDesc 0x00008000
25#define AR_TxRxDesc_S 15
26#define AR_TxQcuNum 0x00000f00
27#define AR_TxQcuNum_S 8
28
29#define AR_BufLen 0x0fff0000
30#define AR_BufLen_S 16
31
32#define AR_TxDescId 0xffff0000
33#define AR_TxDescId_S 16
34#define AR_TxPtrChkSum 0x0000ffff
35
36#define AR_TxTid 0xf0000000
37#define AR_TxTid_S 28
38
39#define AR_LowRxChain 0x00004000
40
41#define AR_Not_Sounding 0x20000000
42
43#define MAP_ISR_S2_CST 6
44#define MAP_ISR_S2_GTT 6
45#define MAP_ISR_S2_TIM 3
46#define MAP_ISR_S2_CABEND 0
47#define MAP_ISR_S2_DTIMSYNC 7
48#define MAP_ISR_S2_DTIM 7
49#define MAP_ISR_S2_TSFOOR 4
50
51#define AR9003TXC_CONST(_ds) ((const struct ar9003_txc *) _ds)
52
53struct ar9003_rxs {
54 u32 ds_info;
55 u32 status1;
56 u32 status2;
57 u32 status3;
58 u32 status4;
59 u32 status5;
60 u32 status6;
61 u32 status7;
62 u32 status8;
63 u32 status9;
64 u32 status10;
65 u32 status11;
66} __packed;
67
68/* Transmit Control Descriptor */
69struct ar9003_txc {
70 u32 info; /* descriptor information */
71 u32 link; /* link pointer */
72 u32 data0; /* data pointer to 1st buffer */
73 u32 ctl3; /* DMA control 3 */
74 u32 data1; /* data pointer to 2nd buffer */
75 u32 ctl5; /* DMA control 5 */
76 u32 data2; /* data pointer to 3rd buffer */
77 u32 ctl7; /* DMA control 7 */
78 u32 data3; /* data pointer to 4th buffer */
79 u32 ctl9; /* DMA control 9 */
80 u32 ctl10; /* DMA control 10 */
81 u32 ctl11; /* DMA control 11 */
82 u32 ctl12; /* DMA control 12 */
83 u32 ctl13; /* DMA control 13 */
84 u32 ctl14; /* DMA control 14 */
85 u32 ctl15; /* DMA control 15 */
86 u32 ctl16; /* DMA control 16 */
87 u32 ctl17; /* DMA control 17 */
88 u32 ctl18; /* DMA control 18 */
89 u32 ctl19; /* DMA control 19 */
90 u32 ctl20; /* DMA control 20 */
91 u32 ctl21; /* DMA control 21 */
92 u32 ctl22; /* DMA control 22 */
93 u32 pad[9]; /* pad to cache line (128 bytes/32 dwords) */
94} __packed;
95
96struct ar9003_txs {
97 u32 ds_info;
98 u32 status1;
99 u32 status2;
100 u32 status3;
101 u32 status4;
102 u32 status5;
103 u32 status6;
104 u32 status7;
105 u32 status8;
106} __packed;
107
108void ar9003_hw_attach_mac_ops(struct ath_hw *hw);
109void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size);
110void ath9k_hw_addrxbuf_edma(struct ath_hw *ah, u32 rxdp,
111 enum ath9k_rx_qtype qtype);
112
113int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah,
114 struct ath_rx_status *rxs,
115 void *buf_addr);
116void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah);
117void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
118 u32 ts_paddr_start,
119 u8 size);
120#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
new file mode 100644
index 000000000000..80431a2f6dc1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -0,0 +1,1134 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "hw.h"
18#include "ar9003_phy.h"
19
20/**
21 * ar9003_hw_set_channel - set channel on single-chip device
22 * @ah: atheros hardware structure
23 * @chan:
24 *
25 * This is the function to change channel on single-chip devices, that is
26 * all devices after ar9280.
27 *
28 * This function takes the channel value in MHz and sets
29 * hardware channel value. Assumes writes have been enabled to analog bus.
30 *
31 * Actual Expression,
32 *
33 * For 2GHz channel,
34 * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
35 * (freq_ref = 40MHz)
36 *
37 * For 5GHz channel,
38 * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
39 * (freq_ref = 40MHz/(24>>amodeRefSel))
40 *
41 * For 5GHz channels which are 5MHz spaced,
42 * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
43 * (freq_ref = 40MHz)
44 */
45static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
46{
47 u16 bMode, fracMode = 0, aModeRefSel = 0;
48 u32 freq, channelSel = 0, reg32 = 0;
49 struct chan_centers centers;
50 int loadSynthChannel;
51
52 ath9k_hw_get_channel_centers(ah, chan, &centers);
53 freq = centers.synth_center;
54
55 if (freq < 4800) { /* 2 GHz, fractional mode */
56 channelSel = CHANSEL_2G(freq);
57 /* Set to 2G mode */
58 bMode = 1;
59 } else {
60 channelSel = CHANSEL_5G(freq);
61 /* Doubler is ON, so, divide channelSel by 2. */
62 channelSel >>= 1;
63 /* Set to 5G mode */
64 bMode = 0;
65 }
66
67 /* Enable fractional mode for all channels */
68 fracMode = 1;
69 aModeRefSel = 0;
70 loadSynthChannel = 0;
71
72 reg32 = (bMode << 29);
73 REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
74
75 /* Enable Long shift Select for Synthesizer */
76 REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_SYNTH4,
77 AR_PHY_SYNTH4_LONG_SHIFT_SELECT, 1);
78
79 /* Program Synth. setting */
80 reg32 = (channelSel << 2) | (fracMode << 30) |
81 (aModeRefSel << 28) | (loadSynthChannel << 31);
82 REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32);
83
84 /* Toggle Load Synth channel bit */
85 loadSynthChannel = 1;
86 reg32 = (channelSel << 2) | (fracMode << 30) |
87 (aModeRefSel << 28) | (loadSynthChannel << 31);
88 REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32);
89
90 ah->curchan = chan;
91 ah->curchan_rad_index = -1;
92
93 return 0;
94}
95
96/**
97 * ar9003_hw_spur_mitigate - convert baseband spur frequency
98 * @ah: atheros hardware structure
99 * @chan:
100 *
101 * For single-chip solutions. Converts to baseband spur frequency given the
102 * input channel frequency and compute register settings below.
103 *
104 * Spur mitigation for MRC CCK
105 */
106static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
107 struct ath9k_channel *chan)
108{
109 u32 spur_freq[4] = { 2420, 2440, 2464, 2480 };
110 int cur_bb_spur, negative = 0, cck_spur_freq;
111 int i;
112
113 /*
114 * Need to verify range +/- 10 MHz in control channel, otherwise spur
115 * is out-of-band and can be ignored.
116 */
117
118 for (i = 0; i < 4; i++) {
119 negative = 0;
120 cur_bb_spur = spur_freq[i] - chan->channel;
121
122 if (cur_bb_spur < 0) {
123 negative = 1;
124 cur_bb_spur = -cur_bb_spur;
125 }
126 if (cur_bb_spur < 10) {
127 cck_spur_freq = (int)((cur_bb_spur << 19) / 11);
128
129 if (negative == 1)
130 cck_spur_freq = -cck_spur_freq;
131
132 cck_spur_freq = cck_spur_freq & 0xfffff;
133
134 REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL,
135 AR_PHY_AGC_CONTROL_YCOK_MAX, 0x7);
136 REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
137 AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR, 0x7f);
138 REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
139 AR_PHY_CCK_SPUR_MIT_SPUR_FILTER_TYPE,
140 0x2);
141 REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
142 AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT,
143 0x1);
144 REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
145 AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ,
146 cck_spur_freq);
147
148 return;
149 }
150 }
151
152 REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL,
153 AR_PHY_AGC_CONTROL_YCOK_MAX, 0x5);
154 REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
155 AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT, 0x0);
156 REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
157 AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ, 0x0);
158}
159
160/* Clean all spur register fields */
161static void ar9003_hw_spur_ofdm_clear(struct ath_hw *ah)
162{
163 REG_RMW_FIELD(ah, AR_PHY_TIMING4,
164 AR_PHY_TIMING4_ENABLE_SPUR_FILTER, 0);
165 REG_RMW_FIELD(ah, AR_PHY_TIMING11,
166 AR_PHY_TIMING11_SPUR_FREQ_SD, 0);
167 REG_RMW_FIELD(ah, AR_PHY_TIMING11,
168 AR_PHY_TIMING11_SPUR_DELTA_PHASE, 0);
169 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
170 AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, 0);
171 REG_RMW_FIELD(ah, AR_PHY_TIMING11,
172 AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0);
173 REG_RMW_FIELD(ah, AR_PHY_TIMING11,
174 AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0);
175 REG_RMW_FIELD(ah, AR_PHY_TIMING4,
176 AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0);
177 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
178 AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI, 0);
179 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
180 AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT, 0);
181
182 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
183 AR_PHY_SPUR_REG_ENABLE_MASK_PPM, 0);
184 REG_RMW_FIELD(ah, AR_PHY_TIMING4,
185 AR_PHY_TIMING4_ENABLE_PILOT_MASK, 0);
186 REG_RMW_FIELD(ah, AR_PHY_TIMING4,
187 AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0);
188 REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
189 AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, 0);
190 REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
191 AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, 0);
192 REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
193 AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, 0);
194 REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
195 AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0);
196 REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
197 AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0);
198 REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
199 AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0);
200 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
201 AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0);
202}
203
204static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
205 int freq_offset,
206 int spur_freq_sd,
207 int spur_delta_phase,
208 int spur_subchannel_sd)
209{
210 int mask_index = 0;
211
212 /* OFDM Spur mitigation */
213 REG_RMW_FIELD(ah, AR_PHY_TIMING4,
214 AR_PHY_TIMING4_ENABLE_SPUR_FILTER, 0x1);
215 REG_RMW_FIELD(ah, AR_PHY_TIMING11,
216 AR_PHY_TIMING11_SPUR_FREQ_SD, spur_freq_sd);
217 REG_RMW_FIELD(ah, AR_PHY_TIMING11,
218 AR_PHY_TIMING11_SPUR_DELTA_PHASE, spur_delta_phase);
219 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
220 AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, spur_subchannel_sd);
221 REG_RMW_FIELD(ah, AR_PHY_TIMING11,
222 AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0x1);
223 REG_RMW_FIELD(ah, AR_PHY_TIMING11,
224 AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0x1);
225 REG_RMW_FIELD(ah, AR_PHY_TIMING4,
226 AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0x1);
227 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
228 AR_PHY_SPUR_REG_SPUR_RSSI_THRESH, 34);
229 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
230 AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI, 1);
231
232 if (REG_READ_FIELD(ah, AR_PHY_MODE,
233 AR_PHY_MODE_DYNAMIC) == 0x1)
234 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
235 AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT, 1);
236
237 mask_index = (freq_offset << 4) / 5;
238 if (mask_index < 0)
239 mask_index = mask_index - 1;
240
241 mask_index = mask_index & 0x7f;
242
243 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
244 AR_PHY_SPUR_REG_ENABLE_MASK_PPM, 0x1);
245 REG_RMW_FIELD(ah, AR_PHY_TIMING4,
246 AR_PHY_TIMING4_ENABLE_PILOT_MASK, 0x1);
247 REG_RMW_FIELD(ah, AR_PHY_TIMING4,
248 AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0x1);
249 REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
250 AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, mask_index);
251 REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
252 AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, mask_index);
253 REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
254 AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, mask_index);
255 REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
256 AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0xc);
257 REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
258 AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0xc);
259 REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
260 AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0xa0);
261 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
262 AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0xff);
263}
264
265static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
266 struct ath9k_channel *chan,
267 int freq_offset)
268{
269 int spur_freq_sd = 0;
270 int spur_subchannel_sd = 0;
271 int spur_delta_phase = 0;
272
273 if (IS_CHAN_HT40(chan)) {
274 if (freq_offset < 0) {
275 if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
276 AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
277 spur_subchannel_sd = 1;
278 else
279 spur_subchannel_sd = 0;
280
281 spur_freq_sd = ((freq_offset + 10) << 9) / 11;
282
283 } else {
284 if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
285 AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
286 spur_subchannel_sd = 0;
287 else
288 spur_subchannel_sd = 1;
289
290 spur_freq_sd = ((freq_offset - 10) << 9) / 11;
291
292 }
293
294 spur_delta_phase = (freq_offset << 17) / 5;
295
296 } else {
297 spur_subchannel_sd = 0;
298 spur_freq_sd = (freq_offset << 9) /11;
299 spur_delta_phase = (freq_offset << 18) / 5;
300 }
301
302 spur_freq_sd = spur_freq_sd & 0x3ff;
303 spur_delta_phase = spur_delta_phase & 0xfffff;
304
305 ar9003_hw_spur_ofdm(ah,
306 freq_offset,
307 spur_freq_sd,
308 spur_delta_phase,
309 spur_subchannel_sd);
310}
311
312/* Spur mitigation for OFDM */
313static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
314 struct ath9k_channel *chan)
315{
316 int synth_freq;
317 int range = 10;
318 int freq_offset = 0;
319 int mode;
320 u8* spurChansPtr;
321 unsigned int i;
322 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
323
324 if (IS_CHAN_5GHZ(chan)) {
325 spurChansPtr = &(eep->modalHeader5G.spurChans[0]);
326 mode = 0;
327 }
328 else {
329 spurChansPtr = &(eep->modalHeader2G.spurChans[0]);
330 mode = 1;
331 }
332
333 if (spurChansPtr[0] == 0)
334 return; /* No spur in the mode */
335
336 if (IS_CHAN_HT40(chan)) {
337 range = 19;
338 if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
339 AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
340 synth_freq = chan->channel - 10;
341 else
342 synth_freq = chan->channel + 10;
343 } else {
344 range = 10;
345 synth_freq = chan->channel;
346 }
347
348 ar9003_hw_spur_ofdm_clear(ah);
349
350 for (i = 0; spurChansPtr[i] && i < 5; i++) {
351 freq_offset = FBIN2FREQ(spurChansPtr[i], mode) - synth_freq;
352 if (abs(freq_offset) < range) {
353 ar9003_hw_spur_ofdm_work(ah, chan, freq_offset);
354 break;
355 }
356 }
357}
358
359static void ar9003_hw_spur_mitigate(struct ath_hw *ah,
360 struct ath9k_channel *chan)
361{
362 ar9003_hw_spur_mitigate_mrc_cck(ah, chan);
363 ar9003_hw_spur_mitigate_ofdm(ah, chan);
364}
365
366static u32 ar9003_hw_compute_pll_control(struct ath_hw *ah,
367 struct ath9k_channel *chan)
368{
369 u32 pll;
370
371 pll = SM(0x5, AR_RTC_9300_PLL_REFDIV);
372
373 if (chan && IS_CHAN_HALF_RATE(chan))
374 pll |= SM(0x1, AR_RTC_9300_PLL_CLKSEL);
375 else if (chan && IS_CHAN_QUARTER_RATE(chan))
376 pll |= SM(0x2, AR_RTC_9300_PLL_CLKSEL);
377
378 pll |= SM(0x2c, AR_RTC_9300_PLL_DIV);
379
380 return pll;
381}
382
383static void ar9003_hw_set_channel_regs(struct ath_hw *ah,
384 struct ath9k_channel *chan)
385{
386 u32 phymode;
387 u32 enableDacFifo = 0;
388
389 enableDacFifo =
390 (REG_READ(ah, AR_PHY_GEN_CTRL) & AR_PHY_GC_ENABLE_DAC_FIFO);
391
392 /* Enable 11n HT, 20 MHz */
393 phymode = AR_PHY_GC_HT_EN | AR_PHY_GC_SINGLE_HT_LTF1 | AR_PHY_GC_WALSH |
394 AR_PHY_GC_SHORT_GI_40 | enableDacFifo;
395
396 /* Configure baseband for dynamic 20/40 operation */
397 if (IS_CHAN_HT40(chan)) {
398 phymode |= AR_PHY_GC_DYN2040_EN;
399 /* Configure control (primary) channel at +-10MHz */
400 if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
401 (chan->chanmode == CHANNEL_G_HT40PLUS))
402 phymode |= AR_PHY_GC_DYN2040_PRI_CH;
403
404 }
405
406 /* make sure we preserve INI settings */
407 phymode |= REG_READ(ah, AR_PHY_GEN_CTRL);
408 /* turn off Green Field detection for STA for now */
409 phymode &= ~AR_PHY_GC_GF_DETECT_EN;
410
411 REG_WRITE(ah, AR_PHY_GEN_CTRL, phymode);
412
413 /* Configure MAC for 20/40 operation */
414 ath9k_hw_set11nmac2040(ah);
415
416 /* global transmit timeout (25 TUs default)*/
417 REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
418 /* carrier sense timeout */
419 REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
420}
421
422static void ar9003_hw_init_bb(struct ath_hw *ah,
423 struct ath9k_channel *chan)
424{
425 u32 synthDelay;
426
427 /*
428 * Wait for the frequency synth to settle (synth goes on
429 * via AR_PHY_ACTIVE_EN). Read the phy active delay register.
430 * Value is in 100ns increments.
431 */
432 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
433 if (IS_CHAN_B(chan))
434 synthDelay = (4 * synthDelay) / 22;
435 else
436 synthDelay /= 10;
437
438 /* Activate the PHY (includes baseband activate + synthesizer on) */
439 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
440
441 /*
442 * There is an issue if the AP starts the calibration before
443 * the base band timeout completes. This could result in the
444 * rx_clear false triggering. As a workaround we add delay an
445 * extra BASE_ACTIVATE_DELAY usecs to ensure this condition
446 * does not happen.
447 */
448 udelay(synthDelay + BASE_ACTIVATE_DELAY);
449}
450
451void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
452{
453 switch (rx) {
454 case 0x5:
455 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
456 AR_PHY_SWAP_ALT_CHAIN);
457 case 0x3:
458 case 0x1:
459 case 0x2:
460 case 0x7:
461 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx);
462 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx);
463 break;
464 default:
465 break;
466 }
467
468 REG_WRITE(ah, AR_SELFGEN_MASK, tx);
469 if (tx == 0x5) {
470 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
471 AR_PHY_SWAP_ALT_CHAIN);
472 }
473}
474
475/*
476 * Override INI values with chip specific configuration.
477 */
478static void ar9003_hw_override_ini(struct ath_hw *ah)
479{
480 u32 val;
481
482 /*
483 * Set the RX_ABORT and RX_DIS and clear it only after
484 * RXE is set for MAC. This prevents frames with
485 * corrupted descriptor status.
486 */
487 REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
488
489 /*
490 * For AR9280 and above, there is a new feature that allows
491 * Multicast search based on both MAC Address and Key ID. By default,
492 * this feature is enabled. But since the driver is not using this
493 * feature, we switch it off; otherwise multicast search based on
494 * MAC addr only will fail.
495 */
496 val = REG_READ(ah, AR_PCU_MISC_MODE2) & (~AR_ADHOC_MCAST_KEYID_ENABLE);
497 REG_WRITE(ah, AR_PCU_MISC_MODE2,
498 val | AR_AGG_WEP_ENABLE_FIX | AR_AGG_WEP_ENABLE);
499}
500
501static void ar9003_hw_prog_ini(struct ath_hw *ah,
502 struct ar5416IniArray *iniArr,
503 int column)
504{
505 unsigned int i, regWrites = 0;
506
507 /* New INI format: Array may be undefined (pre, core, post arrays) */
508 if (!iniArr->ia_array)
509 return;
510
511 /*
512 * New INI format: Pre, core, and post arrays for a given subsystem
513 * may be modal (> 2 columns) or non-modal (2 columns). Determine if
514 * the array is non-modal and force the column to 1.
515 */
516 if (column >= iniArr->ia_columns)
517 column = 1;
518
519 for (i = 0; i < iniArr->ia_rows; i++) {
520 u32 reg = INI_RA(iniArr, i, 0);
521 u32 val = INI_RA(iniArr, i, column);
522
523 REG_WRITE(ah, reg, val);
524
525 /*
526 * Determine if this is a shift register value, and insert the
527 * configured delay if so.
528 */
529 if (reg >= 0x16000 && reg < 0x17000
530 && ah->config.analog_shiftreg)
531 udelay(100);
532
533 DO_DELAY(regWrites);
534 }
535}
536
537static int ar9003_hw_process_ini(struct ath_hw *ah,
538 struct ath9k_channel *chan)
539{
540 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
541 unsigned int regWrites = 0, i;
542 struct ieee80211_channel *channel = chan->chan;
543 u32 modesIndex, freqIndex;
544
545 switch (chan->chanmode) {
546 case CHANNEL_A:
547 case CHANNEL_A_HT20:
548 modesIndex = 1;
549 freqIndex = 1;
550 break;
551 case CHANNEL_A_HT40PLUS:
552 case CHANNEL_A_HT40MINUS:
553 modesIndex = 2;
554 freqIndex = 1;
555 break;
556 case CHANNEL_G:
557 case CHANNEL_G_HT20:
558 case CHANNEL_B:
559 modesIndex = 4;
560 freqIndex = 2;
561 break;
562 case CHANNEL_G_HT40PLUS:
563 case CHANNEL_G_HT40MINUS:
564 modesIndex = 3;
565 freqIndex = 2;
566 break;
567
568 default:
569 return -EINVAL;
570 }
571
572 for (i = 0; i < ATH_INI_NUM_SPLIT; i++) {
573 ar9003_hw_prog_ini(ah, &ah->iniSOC[i], modesIndex);
574 ar9003_hw_prog_ini(ah, &ah->iniMac[i], modesIndex);
575 ar9003_hw_prog_ini(ah, &ah->iniBB[i], modesIndex);
576 ar9003_hw_prog_ini(ah, &ah->iniRadio[i], modesIndex);
577 }
578
579 REG_WRITE_ARRAY(&ah->iniModesRxGain, 1, regWrites);
580 REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
581
582 /*
583 * For 5GHz channels requiring Fast Clock, apply
584 * different modal values.
585 */
586 if (IS_CHAN_A_FAST_CLOCK(ah, chan))
587 REG_WRITE_ARRAY(&ah->iniModesAdditional,
588 modesIndex, regWrites);
589
590 ar9003_hw_override_ini(ah);
591 ar9003_hw_set_channel_regs(ah, chan);
592 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
593
594 /* Set TX power */
595 ah->eep_ops->set_txpower(ah, chan,
596 ath9k_regd_get_ctl(regulatory, chan),
597 channel->max_antenna_gain * 2,
598 channel->max_power * 2,
599 min((u32) MAX_RATE_POWER,
600 (u32) regulatory->power_limit));
601
602 return 0;
603}
604
605static void ar9003_hw_set_rfmode(struct ath_hw *ah,
606 struct ath9k_channel *chan)
607{
608 u32 rfMode = 0;
609
610 if (chan == NULL)
611 return;
612
613 rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
614 ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
615
616 if (IS_CHAN_A_FAST_CLOCK(ah, chan))
617 rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
618
619 REG_WRITE(ah, AR_PHY_MODE, rfMode);
620}
621
622static void ar9003_hw_mark_phy_inactive(struct ath_hw *ah)
623{
624 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
625}
626
627static void ar9003_hw_set_delta_slope(struct ath_hw *ah,
628 struct ath9k_channel *chan)
629{
630 u32 coef_scaled, ds_coef_exp, ds_coef_man;
631 u32 clockMhzScaled = 0x64000000;
632 struct chan_centers centers;
633
634 /*
635 * half and quarter rate can divide the scaled clock by 2 or 4
636 * scale for selected channel bandwidth
637 */
638 if (IS_CHAN_HALF_RATE(chan))
639 clockMhzScaled = clockMhzScaled >> 1;
640 else if (IS_CHAN_QUARTER_RATE(chan))
641 clockMhzScaled = clockMhzScaled >> 2;
642
643 /*
644 * ALGO -> coef = 1e8/fcarrier*fclock/40;
645 * scaled coef to provide precision for this floating calculation
646 */
647 ath9k_hw_get_channel_centers(ah, chan, &centers);
648 coef_scaled = clockMhzScaled / centers.synth_center;
649
650 ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
651 &ds_coef_exp);
652
653 REG_RMW_FIELD(ah, AR_PHY_TIMING3,
654 AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
655 REG_RMW_FIELD(ah, AR_PHY_TIMING3,
656 AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
657
658 /*
659 * For Short GI,
660 * scaled coeff is 9/10 that of normal coeff
661 */
662 coef_scaled = (9 * coef_scaled) / 10;
663
664 ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
665 &ds_coef_exp);
666
667 /* for short gi */
668 REG_RMW_FIELD(ah, AR_PHY_SGI_DELTA,
669 AR_PHY_SGI_DSC_MAN, ds_coef_man);
670 REG_RMW_FIELD(ah, AR_PHY_SGI_DELTA,
671 AR_PHY_SGI_DSC_EXP, ds_coef_exp);
672}
673
674static bool ar9003_hw_rfbus_req(struct ath_hw *ah)
675{
676 REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
677 return ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
678 AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT);
679}
680
681/*
682 * Wait for the frequency synth to settle (synth goes on via PHY_ACTIVE_EN).
683 * Read the phy active delay register. Value is in 100ns increments.
684 */
685static void ar9003_hw_rfbus_done(struct ath_hw *ah)
686{
687 u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
688 if (IS_CHAN_B(ah->curchan))
689 synthDelay = (4 * synthDelay) / 22;
690 else
691 synthDelay /= 10;
692
693 udelay(synthDelay + BASE_ACTIVATE_DELAY);
694
695 REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
696}
697
698/*
699 * Set the interrupt and GPIO values so the ISR can disable RF
700 * on a switch signal. Assumes GPIO port and interrupt polarity
701 * are set prior to call.
702 */
703static void ar9003_hw_enable_rfkill(struct ath_hw *ah)
704{
705 /* Connect rfsilent_bb_l to baseband */
706 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
707 AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
708 /* Set input mux for rfsilent_bb_l to GPIO #0 */
709 REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
710 AR_GPIO_INPUT_MUX2_RFSILENT);
711
712 /*
713 * Configure the desired GPIO port for input and
714 * enable baseband rf silence.
715 */
716 ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
717 REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
718}
719
720static void ar9003_hw_set_diversity(struct ath_hw *ah, bool value)
721{
722 u32 v = REG_READ(ah, AR_PHY_CCK_DETECT);
723 if (value)
724 v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
725 else
726 v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
727 REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
728}
729
730static bool ar9003_hw_ani_control(struct ath_hw *ah,
731 enum ath9k_ani_cmd cmd, int param)
732{
733 struct ar5416AniState *aniState = ah->curani;
734 struct ath_common *common = ath9k_hw_common(ah);
735
736 switch (cmd & ah->ani_function) {
737 case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
738 u32 level = param;
739
740 if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
741 ath_print(common, ATH_DBG_ANI,
742 "level out of range (%u > %u)\n",
743 level,
744 (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
745 return false;
746 }
747
748 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
749 AR_PHY_DESIRED_SZ_TOT_DES,
750 ah->totalSizeDesired[level]);
751 REG_RMW_FIELD(ah, AR_PHY_AGC,
752 AR_PHY_AGC_COARSE_LOW,
753 ah->coarse_low[level]);
754 REG_RMW_FIELD(ah, AR_PHY_AGC,
755 AR_PHY_AGC_COARSE_HIGH,
756 ah->coarse_high[level]);
757 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
758 AR_PHY_FIND_SIG_FIRPWR, ah->firpwr[level]);
759
760 if (level > aniState->noiseImmunityLevel)
761 ah->stats.ast_ani_niup++;
762 else if (level < aniState->noiseImmunityLevel)
763 ah->stats.ast_ani_nidown++;
764 aniState->noiseImmunityLevel = level;
765 break;
766 }
767 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
768 const int m1ThreshLow[] = { 127, 50 };
769 const int m2ThreshLow[] = { 127, 40 };
770 const int m1Thresh[] = { 127, 0x4d };
771 const int m2Thresh[] = { 127, 0x40 };
772 const int m2CountThr[] = { 31, 16 };
773 const int m2CountThrLow[] = { 63, 48 };
774 u32 on = param ? 1 : 0;
775
776 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
777 AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
778 m1ThreshLow[on]);
779 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
780 AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
781 m2ThreshLow[on]);
782 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
783 AR_PHY_SFCORR_M1_THRESH, m1Thresh[on]);
784 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
785 AR_PHY_SFCORR_M2_THRESH, m2Thresh[on]);
786 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
787 AR_PHY_SFCORR_M2COUNT_THR, m2CountThr[on]);
788 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
789 AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
790 m2CountThrLow[on]);
791
792 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
793 AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLow[on]);
794 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
795 AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLow[on]);
796 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
797 AR_PHY_SFCORR_EXT_M1_THRESH, m1Thresh[on]);
798 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
799 AR_PHY_SFCORR_EXT_M2_THRESH, m2Thresh[on]);
800
801 if (on)
802 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
803 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
804 else
805 REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
806 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
807
808 if (!on != aniState->ofdmWeakSigDetectOff) {
809 if (on)
810 ah->stats.ast_ani_ofdmon++;
811 else
812 ah->stats.ast_ani_ofdmoff++;
813 aniState->ofdmWeakSigDetectOff = !on;
814 }
815 break;
816 }
817 case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
818 const int weakSigThrCck[] = { 8, 6 };
819 u32 high = param ? 1 : 0;
820
821 REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
822 AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
823 weakSigThrCck[high]);
824 if (high != aniState->cckWeakSigThreshold) {
825 if (high)
826 ah->stats.ast_ani_cckhigh++;
827 else
828 ah->stats.ast_ani_ccklow++;
829 aniState->cckWeakSigThreshold = high;
830 }
831 break;
832 }
833 case ATH9K_ANI_FIRSTEP_LEVEL:{
834 const int firstep[] = { 0, 4, 8 };
835 u32 level = param;
836
837 if (level >= ARRAY_SIZE(firstep)) {
838 ath_print(common, ATH_DBG_ANI,
839 "level out of range (%u > %u)\n",
840 level,
841 (unsigned) ARRAY_SIZE(firstep));
842 return false;
843 }
844 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
845 AR_PHY_FIND_SIG_FIRSTEP,
846 firstep[level]);
847 if (level > aniState->firstepLevel)
848 ah->stats.ast_ani_stepup++;
849 else if (level < aniState->firstepLevel)
850 ah->stats.ast_ani_stepdown++;
851 aniState->firstepLevel = level;
852 break;
853 }
854 case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
855 const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
856 u32 level = param;
857
858 if (level >= ARRAY_SIZE(cycpwrThr1)) {
859 ath_print(common, ATH_DBG_ANI,
860 "level out of range (%u > %u)\n",
861 level,
862 (unsigned) ARRAY_SIZE(cycpwrThr1));
863 return false;
864 }
865 REG_RMW_FIELD(ah, AR_PHY_TIMING5,
866 AR_PHY_TIMING5_CYCPWR_THR1,
867 cycpwrThr1[level]);
868 if (level > aniState->spurImmunityLevel)
869 ah->stats.ast_ani_spurup++;
870 else if (level < aniState->spurImmunityLevel)
871 ah->stats.ast_ani_spurdown++;
872 aniState->spurImmunityLevel = level;
873 break;
874 }
875 case ATH9K_ANI_PRESENT:
876 break;
877 default:
878 ath_print(common, ATH_DBG_ANI,
879 "invalid cmd %u\n", cmd);
880 return false;
881 }
882
883 ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
884 ath_print(common, ATH_DBG_ANI,
885 "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
886 "ofdmWeakSigDetectOff=%d\n",
887 aniState->noiseImmunityLevel,
888 aniState->spurImmunityLevel,
889 !aniState->ofdmWeakSigDetectOff);
890 ath_print(common, ATH_DBG_ANI,
891 "cckWeakSigThreshold=%d, "
892 "firstepLevel=%d, listenTime=%d\n",
893 aniState->cckWeakSigThreshold,
894 aniState->firstepLevel,
895 aniState->listenTime);
896 ath_print(common, ATH_DBG_ANI,
897 "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
898 aniState->cycleCount,
899 aniState->ofdmPhyErrCount,
900 aniState->cckPhyErrCount);
901
902 return true;
903}
904
905static void ar9003_hw_nf_sanitize_2g(struct ath_hw *ah, s16 *nf)
906{
907 struct ath_common *common = ath9k_hw_common(ah);
908
909 if (*nf > ah->nf_2g_max) {
910 ath_print(common, ATH_DBG_CALIBRATE,
911 "2 GHz NF (%d) > MAX (%d), "
912 "correcting to MAX",
913 *nf, ah->nf_2g_max);
914 *nf = ah->nf_2g_max;
915 } else if (*nf < ah->nf_2g_min) {
916 ath_print(common, ATH_DBG_CALIBRATE,
917 "2 GHz NF (%d) < MIN (%d), "
918 "correcting to MIN",
919 *nf, ah->nf_2g_min);
920 *nf = ah->nf_2g_min;
921 }
922}
923
924static void ar9003_hw_nf_sanitize_5g(struct ath_hw *ah, s16 *nf)
925{
926 struct ath_common *common = ath9k_hw_common(ah);
927
928 if (*nf > ah->nf_5g_max) {
929 ath_print(common, ATH_DBG_CALIBRATE,
930 "5 GHz NF (%d) > MAX (%d), "
931 "correcting to MAX",
932 *nf, ah->nf_5g_max);
933 *nf = ah->nf_5g_max;
934 } else if (*nf < ah->nf_5g_min) {
935 ath_print(common, ATH_DBG_CALIBRATE,
936 "5 GHz NF (%d) < MIN (%d), "
937 "correcting to MIN",
938 *nf, ah->nf_5g_min);
939 *nf = ah->nf_5g_min;
940 }
941}
942
943static void ar9003_hw_nf_sanitize(struct ath_hw *ah, s16 *nf)
944{
945 if (IS_CHAN_2GHZ(ah->curchan))
946 ar9003_hw_nf_sanitize_2g(ah, nf);
947 else
948 ar9003_hw_nf_sanitize_5g(ah, nf);
949}
950
951static void ar9003_hw_do_getnf(struct ath_hw *ah,
952 int16_t nfarray[NUM_NF_READINGS])
953{
954 struct ath_common *common = ath9k_hw_common(ah);
955 int16_t nf;
956
957 nf = MS(REG_READ(ah, AR_PHY_CCA_0), AR_PHY_MINCCA_PWR);
958 if (nf & 0x100)
959 nf = 0 - ((nf ^ 0x1ff) + 1);
960 ar9003_hw_nf_sanitize(ah, &nf);
961 ath_print(common, ATH_DBG_CALIBRATE,
962 "NF calibrated [ctl] [chain 0] is %d\n", nf);
963 nfarray[0] = nf;
964
965 nf = MS(REG_READ(ah, AR_PHY_CCA_1), AR_PHY_CH1_MINCCA_PWR);
966 if (nf & 0x100)
967 nf = 0 - ((nf ^ 0x1ff) + 1);
968 ar9003_hw_nf_sanitize(ah, &nf);
969 ath_print(common, ATH_DBG_CALIBRATE,
970 "NF calibrated [ctl] [chain 1] is %d\n", nf);
971 nfarray[1] = nf;
972
973 nf = MS(REG_READ(ah, AR_PHY_CCA_2), AR_PHY_CH2_MINCCA_PWR);
974 if (nf & 0x100)
975 nf = 0 - ((nf ^ 0x1ff) + 1);
976 ar9003_hw_nf_sanitize(ah, &nf);
977 ath_print(common, ATH_DBG_CALIBRATE,
978 "NF calibrated [ctl] [chain 2] is %d\n", nf);
979 nfarray[2] = nf;
980
981 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
982 if (nf & 0x100)
983 nf = 0 - ((nf ^ 0x1ff) + 1);
984 ar9003_hw_nf_sanitize(ah, &nf);
985 ath_print(common, ATH_DBG_CALIBRATE,
986 "NF calibrated [ext] [chain 0] is %d\n", nf);
987 nfarray[3] = nf;
988
989 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_1), AR_PHY_CH1_EXT_MINCCA_PWR);
990 if (nf & 0x100)
991 nf = 0 - ((nf ^ 0x1ff) + 1);
992 ar9003_hw_nf_sanitize(ah, &nf);
993 ath_print(common, ATH_DBG_CALIBRATE,
994 "NF calibrated [ext] [chain 1] is %d\n", nf);
995 nfarray[4] = nf;
996
997 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_2), AR_PHY_CH2_EXT_MINCCA_PWR);
998 if (nf & 0x100)
999 nf = 0 - ((nf ^ 0x1ff) + 1);
1000 ar9003_hw_nf_sanitize(ah, &nf);
1001 ath_print(common, ATH_DBG_CALIBRATE,
1002 "NF calibrated [ext] [chain 2] is %d\n", nf);
1003 nfarray[5] = nf;
1004}
1005
1006void ar9003_hw_set_nf_limits(struct ath_hw *ah)
1007{
1008 ah->nf_2g_max = AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ;
1009 ah->nf_2g_min = AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ;
1010 ah->nf_5g_max = AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ;
1011 ah->nf_5g_min = AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ;
1012}
1013
1014/*
1015 * Find out which of the RX chains are enabled
1016 */
1017static u32 ar9003_hw_get_rx_chainmask(struct ath_hw *ah)
1018{
1019 u32 chain = REG_READ(ah, AR_PHY_RX_CHAINMASK);
1020 /*
1021 * The bits [2:0] indicate the rx chain mask and are to be
1022 * interpreted as follows:
1023 * 00x => Only chain 0 is enabled
1024 * 01x => Chain 1 and 0 enabled
1025 * 1xx => Chain 2,1 and 0 enabled
1026 */
1027 return chain & 0x7;
1028}
1029
1030static void ar9003_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
1031{
1032 struct ath9k_nfcal_hist *h;
1033 unsigned i, j;
1034 int32_t val;
1035 const u32 ar9300_cca_regs[6] = {
1036 AR_PHY_CCA_0,
1037 AR_PHY_CCA_1,
1038 AR_PHY_CCA_2,
1039 AR_PHY_EXT_CCA,
1040 AR_PHY_EXT_CCA_1,
1041 AR_PHY_EXT_CCA_2,
1042 };
1043 u8 chainmask, rx_chain_status;
1044 struct ath_common *common = ath9k_hw_common(ah);
1045
1046 rx_chain_status = ar9003_hw_get_rx_chainmask(ah);
1047
1048 chainmask = 0x3F;
1049 h = ah->nfCalHist;
1050
1051 for (i = 0; i < NUM_NF_READINGS; i++) {
1052 if (chainmask & (1 << i)) {
1053 val = REG_READ(ah, ar9300_cca_regs[i]);
1054 val &= 0xFFFFFE00;
1055 val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
1056 REG_WRITE(ah, ar9300_cca_regs[i], val);
1057 }
1058 }
1059
1060 /*
1061 * Load software filtered NF value into baseband internal minCCApwr
1062 * variable.
1063 */
1064 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
1065 AR_PHY_AGC_CONTROL_ENABLE_NF);
1066 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
1067 AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
1068 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
1069
1070 /*
1071 * Wait for load to complete, should be fast, a few 10s of us.
1072 * The max delay was changed from an original 250us to 10000us
1073 * since 250us often results in NF load timeout and causes deaf
1074 * condition during stress testing 12/12/2009
1075 */
1076 for (j = 0; j < 1000; j++) {
1077 if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
1078 AR_PHY_AGC_CONTROL_NF) == 0)
1079 break;
1080 udelay(10);
1081 }
1082
1083 /*
1084 * We timed out waiting for the noisefloor to load, probably due to an
1085 * in-progress rx. Simply return here and allow the load plenty of time
1086 * to complete before the next calibration interval. We need to avoid
1087 * trying to load -50 (which happens below) while the previous load is
1088 * still in progress as this can cause rx deafness. Instead by returning
1089 * here, the baseband nf cal will just be capped by our present
1090 * noisefloor until the next calibration timer.
1091 */
1092 if (j == 1000) {
1093 ath_print(common, ATH_DBG_ANY, "Timeout while waiting for nf "
1094 "to load: AR_PHY_AGC_CONTROL=0x%x\n",
1095 REG_READ(ah, AR_PHY_AGC_CONTROL));
1096 return;
1097 }
1098
1099 /*
1100 * Restore maxCCAPower register parameter again so that we're not capped
1101 * by the median we just loaded. This will be initial (and max) value
1102 * of next noise floor calibration the baseband does.
1103 */
1104 for (i = 0; i < NUM_NF_READINGS; i++) {
1105 if (chainmask & (1 << i)) {
1106 val = REG_READ(ah, ar9300_cca_regs[i]);
1107 val &= 0xFFFFFE00;
1108 val |= (((u32) (-50) << 1) & 0x1ff);
1109 REG_WRITE(ah, ar9300_cca_regs[i], val);
1110 }
1111 }
1112}
1113
1114void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
1115{
1116 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
1117
1118 priv_ops->rf_set_freq = ar9003_hw_set_channel;
1119 priv_ops->spur_mitigate_freq = ar9003_hw_spur_mitigate;
1120 priv_ops->compute_pll_control = ar9003_hw_compute_pll_control;
1121 priv_ops->set_channel_regs = ar9003_hw_set_channel_regs;
1122 priv_ops->init_bb = ar9003_hw_init_bb;
1123 priv_ops->process_ini = ar9003_hw_process_ini;
1124 priv_ops->set_rfmode = ar9003_hw_set_rfmode;
1125 priv_ops->mark_phy_inactive = ar9003_hw_mark_phy_inactive;
1126 priv_ops->set_delta_slope = ar9003_hw_set_delta_slope;
1127 priv_ops->rfbus_req = ar9003_hw_rfbus_req;
1128 priv_ops->rfbus_done = ar9003_hw_rfbus_done;
1129 priv_ops->enable_rfkill = ar9003_hw_enable_rfkill;
1130 priv_ops->set_diversity = ar9003_hw_set_diversity;
1131 priv_ops->ani_control = ar9003_hw_ani_control;
1132 priv_ops->do_getnf = ar9003_hw_do_getnf;
1133 priv_ops->loadnf = ar9003_hw_loadnf;
1134}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
new file mode 100644
index 000000000000..f08cc8bda005
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -0,0 +1,847 @@
1/*
2 * Copyright (c) 2002-2010 Atheros Communications, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef AR9003_PHY_H
18#define AR9003_PHY_H
19
20/*
21 * Channel Register Map
22 */
23#define AR_CHAN_BASE 0x9800
24
25#define AR_PHY_TIMING1 (AR_CHAN_BASE + 0x0)
26#define AR_PHY_TIMING2 (AR_CHAN_BASE + 0x4)
27#define AR_PHY_TIMING3 (AR_CHAN_BASE + 0x8)
28#define AR_PHY_TIMING4 (AR_CHAN_BASE + 0xc)
29#define AR_PHY_TIMING5 (AR_CHAN_BASE + 0x10)
30#define AR_PHY_TIMING6 (AR_CHAN_BASE + 0x14)
31#define AR_PHY_TIMING11 (AR_CHAN_BASE + 0x18)
32#define AR_PHY_SPUR_REG (AR_CHAN_BASE + 0x1c)
33#define AR_PHY_RX_IQCAL_CORR_B0 (AR_CHAN_BASE + 0xdc)
34#define AR_PHY_TX_IQCAL_CONTROL_3 (AR_CHAN_BASE + 0xb0)
35
36#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
37#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
38
39#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF
40#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
41
42#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC 0x40000000
43#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC_S 30
44
45#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR 0x80000000
46#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR_S 31
47
48#define AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT 0x4000000
49#define AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT_S 26
50
51#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000 /* bins move with freq offset */
52#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM_S 17
53#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x000000FF
54#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
55#define AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI 0x00000100
56#define AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI_S 8
57#define AR_PHY_SPUR_REG_MASK_RATE_CNTL 0x03FC0000
58#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
59
60#define AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN 0x20000000
61#define AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN_S 29
62
63#define AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN 0x80000000
64#define AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN_S 31
65
66#define AR_PHY_FIND_SIG_LOW (AR_CHAN_BASE + 0x20)
67
68#define AR_PHY_SFCORR (AR_CHAN_BASE + 0x24)
69#define AR_PHY_SFCORR_LOW (AR_CHAN_BASE + 0x28)
70#define AR_PHY_SFCORR_EXT (AR_CHAN_BASE + 0x2c)
71
72#define AR_PHY_EXT_CCA (AR_CHAN_BASE + 0x30)
73#define AR_PHY_RADAR_0 (AR_CHAN_BASE + 0x34)
74#define AR_PHY_RADAR_1 (AR_CHAN_BASE + 0x38)
75#define AR_PHY_RADAR_EXT (AR_CHAN_BASE + 0x3c)
76#define AR_PHY_MULTICHAIN_CTRL (AR_CHAN_BASE + 0x80)
77#define AR_PHY_PERCHAIN_CSD (AR_CHAN_BASE + 0x84)
78
79#define AR_PHY_TX_PHASE_RAMP_0 (AR_CHAN_BASE + 0xd0)
80#define AR_PHY_ADC_GAIN_DC_CORR_0 (AR_CHAN_BASE + 0xd4)
81#define AR_PHY_IQ_ADC_MEAS_0_B0 (AR_CHAN_BASE + 0xc0)
82#define AR_PHY_IQ_ADC_MEAS_1_B0 (AR_CHAN_BASE + 0xc4)
83#define AR_PHY_IQ_ADC_MEAS_2_B0 (AR_CHAN_BASE + 0xc8)
84#define AR_PHY_IQ_ADC_MEAS_3_B0 (AR_CHAN_BASE + 0xcc)
85
86/* The following registers changed position from AR9300 1.0 to AR9300 2.0 */
87#define AR_PHY_TX_PHASE_RAMP_0_9300_10 (AR_CHAN_BASE + 0xd0 - 0x10)
88#define AR_PHY_ADC_GAIN_DC_CORR_0_9300_10 (AR_CHAN_BASE + 0xd4 - 0x10)
89#define AR_PHY_IQ_ADC_MEAS_0_B0_9300_10 (AR_CHAN_BASE + 0xc0 + 0x8)
90#define AR_PHY_IQ_ADC_MEAS_1_B0_9300_10 (AR_CHAN_BASE + 0xc4 + 0x8)
91#define AR_PHY_IQ_ADC_MEAS_2_B0_9300_10 (AR_CHAN_BASE + 0xc8 + 0x8)
92#define AR_PHY_IQ_ADC_MEAS_3_B0_9300_10 (AR_CHAN_BASE + 0xcc + 0x8)
93
94#define AR_PHY_TX_CRC (AR_CHAN_BASE + 0xa0)
95#define AR_PHY_TST_DAC_CONST (AR_CHAN_BASE + 0xa4)
96#define AR_PHY_SPUR_REPORT_0 (AR_CHAN_BASE + 0xa8)
97#define AR_PHY_CHAN_INFO_TAB_0 (AR_CHAN_BASE + 0x300)
98
99/*
100 * Channel Field Definitions
101 */
102#define AR_PHY_TIMING2_USE_FORCE_PPM 0x00001000
103#define AR_PHY_TIMING2_FORCE_PPM_VAL 0x00000fff
104#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000
105#define AR_PHY_TIMING3_DSC_MAN_S 17
106#define AR_PHY_TIMING3_DSC_EXP 0x0001E000
107#define AR_PHY_TIMING3_DSC_EXP_S 13
108#define AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX 0xF000
109#define AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX_S 12
110#define AR_PHY_TIMING4_DO_CAL 0x10000
111
112#define AR_PHY_TIMING4_ENABLE_PILOT_MASK 0x10000000
113#define AR_PHY_TIMING4_ENABLE_PILOT_MASK_S 28
114#define AR_PHY_TIMING4_ENABLE_CHAN_MASK 0x20000000
115#define AR_PHY_TIMING4_ENABLE_CHAN_MASK_S 29
116
117#define AR_PHY_TIMING4_ENABLE_SPUR_FILTER 0x40000000
118#define AR_PHY_TIMING4_ENABLE_SPUR_FILTER_S 30
119#define AR_PHY_TIMING4_ENABLE_SPUR_RSSI 0x80000000
120#define AR_PHY_TIMING4_ENABLE_SPUR_RSSI_S 31
121
122#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
123#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
124#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
125#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00
126#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
127#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000
128#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
129#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000
130#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
131#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F
132#define AR_PHY_SFCORR_M2COUNT_THR_S 0
133#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000
134#define AR_PHY_SFCORR_M1_THRESH_S 17
135#define AR_PHY_SFCORR_M2_THRESH 0x7F000000
136#define AR_PHY_SFCORR_M2_THRESH_S 24
137#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F
138#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0
139#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80
140#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7
141#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000
142#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
143#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000
144#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
145#define AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD 0x10000000
146#define AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD_S 28
147#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
148#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
149#define AR_PHY_EXT_CCA_THRESH62_S 16
150#define AR_PHY_EXT_MINCCA_PWR 0x01FF0000
151#define AR_PHY_EXT_MINCCA_PWR_S 16
152#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
153#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
154#define AR_PHY_TIMING5_CYCPWR_THR1_ENABLE 0x00000001
155#define AR_PHY_TIMING5_CYCPWR_THR1_ENABLE_S 0
156#define AR_PHY_TIMING5_CYCPWR_THR1A 0x007F0000
157#define AR_PHY_TIMING5_CYCPWR_THR1A_S 16
158#define AR_PHY_TIMING5_RSSI_THR1A (0x7F << 16)
159#define AR_PHY_TIMING5_RSSI_THR1A_S 16
160#define AR_PHY_TIMING5_RSSI_THR1A_ENA (0x1 << 15)
161#define AR_PHY_RADAR_0_ENA 0x00000001
162#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
163#define AR_PHY_RADAR_0_INBAND 0x0000003e
164#define AR_PHY_RADAR_0_INBAND_S 1
165#define AR_PHY_RADAR_0_PRSSI 0x00000FC0
166#define AR_PHY_RADAR_0_PRSSI_S 6
167#define AR_PHY_RADAR_0_HEIGHT 0x0003F000
168#define AR_PHY_RADAR_0_HEIGHT_S 12
169#define AR_PHY_RADAR_0_RRSSI 0x00FC0000
170#define AR_PHY_RADAR_0_RRSSI_S 18
171#define AR_PHY_RADAR_0_FIRPWR 0x7F000000
172#define AR_PHY_RADAR_0_FIRPWR_S 24
173#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
174#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
175#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
176#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
177#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
178#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
179#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
180#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
181#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
182#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
183#define AR_PHY_RADAR_1_MAXLEN_S 0
184#define AR_PHY_RADAR_EXT_ENA 0x00004000
185#define AR_PHY_RADAR_DC_PWR_THRESH 0x007f8000
186#define AR_PHY_RADAR_DC_PWR_THRESH_S 15
187#define AR_PHY_RADAR_LB_DC_CAP 0x7f800000
188#define AR_PHY_RADAR_LB_DC_CAP_S 23
189#define AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW (0x3f << 6)
190#define AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW_S 6
191#define AR_PHY_FIND_SIG_LOW_FIRPWR (0x7f << 12)
192#define AR_PHY_FIND_SIG_LOW_FIRPWR_S 12
193#define AR_PHY_FIND_SIG_LOW_FIRPWR_SIGN_BIT 19
194#define AR_PHY_FIND_SIG_LOW_RELSTEP 0x1f
195#define AR_PHY_FIND_SIG_LOW_RELSTEP_S 0
196#define AR_PHY_FIND_SIG_LOW_RELSTEP_SIGN_BIT 5
197#define AR_PHY_CHAN_INFO_TAB_S2_READ 0x00000008
198#define AR_PHY_CHAN_INFO_TAB_S2_READ_S 3
199#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF 0x0000007F
200#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF_S 0
201#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF 0x00003F80
202#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF_S 7
203#define AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE 0x00004000
204#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_Q_COFF 0x003f8000
205#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_Q_COFF_S 15
206#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_I_COFF 0x1fc00000
207#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_I_COFF_S 22
208
209/*
210 * MRC Register Map
211 */
212#define AR_MRC_BASE 0x9c00
213
214#define AR_PHY_TIMING_3A (AR_MRC_BASE + 0x0)
215#define AR_PHY_LDPC_CNTL1 (AR_MRC_BASE + 0x4)
216#define AR_PHY_LDPC_CNTL2 (AR_MRC_BASE + 0x8)
217#define AR_PHY_PILOT_SPUR_MASK (AR_MRC_BASE + 0xc)
218#define AR_PHY_CHAN_SPUR_MASK (AR_MRC_BASE + 0x10)
219#define AR_PHY_SGI_DELTA (AR_MRC_BASE + 0x14)
220#define AR_PHY_ML_CNTL_1 (AR_MRC_BASE + 0x18)
221#define AR_PHY_ML_CNTL_2 (AR_MRC_BASE + 0x1c)
222#define AR_PHY_TST_ADC (AR_MRC_BASE + 0x20)
223
224#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A 0x00000FE0
225#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A_S 5
226#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A 0x1F
227#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A_S 0
228
229#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A 0x00000FE0
230#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A_S 5
231#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A 0x1F
232#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A_S 0
233
234/*
235 * MRC Feild Definitions
236 */
237#define AR_PHY_SGI_DSC_MAN 0x0007FFF0
238#define AR_PHY_SGI_DSC_MAN_S 4
239#define AR_PHY_SGI_DSC_EXP 0x0000000F
240#define AR_PHY_SGI_DSC_EXP_S 0
241/*
242 * BBB Register Map
243 */
244#define AR_BBB_BASE 0x9d00
245
246/*
247 * AGC Register Map
248 */
249#define AR_AGC_BASE 0x9e00
250
251#define AR_PHY_SETTLING (AR_AGC_BASE + 0x0)
252#define AR_PHY_FORCEMAX_GAINS_0 (AR_AGC_BASE + 0x4)
253#define AR_PHY_GAINS_MINOFF0 (AR_AGC_BASE + 0x8)
254#define AR_PHY_DESIRED_SZ (AR_AGC_BASE + 0xc)
255#define AR_PHY_FIND_SIG (AR_AGC_BASE + 0x10)
256#define AR_PHY_AGC (AR_AGC_BASE + 0x14)
257#define AR_PHY_EXT_ATTEN_CTL_0 (AR_AGC_BASE + 0x18)
258#define AR_PHY_CCA_0 (AR_AGC_BASE + 0x1c)
259#define AR_PHY_EXT_CCA0 (AR_AGC_BASE + 0x20)
260#define AR_PHY_RESTART (AR_AGC_BASE + 0x24)
261#define AR_PHY_MC_GAIN_CTRL (AR_AGC_BASE + 0x28)
262#define AR_PHY_EXTCHN_PWRTHR1 (AR_AGC_BASE + 0x2c)
263#define AR_PHY_EXT_CHN_WIN (AR_AGC_BASE + 0x30)
264#define AR_PHY_20_40_DET_THR (AR_AGC_BASE + 0x34)
265#define AR_PHY_RIFS_SRCH (AR_AGC_BASE + 0x38)
266#define AR_PHY_PEAK_DET_CTRL_1 (AR_AGC_BASE + 0x3c)
267#define AR_PHY_PEAK_DET_CTRL_2 (AR_AGC_BASE + 0x40)
268#define AR_PHY_RX_GAIN_BOUNDS_1 (AR_AGC_BASE + 0x44)
269#define AR_PHY_RX_GAIN_BOUNDS_2 (AR_AGC_BASE + 0x48)
270#define AR_PHY_RSSI_0 (AR_AGC_BASE + 0x180)
271#define AR_PHY_SPUR_CCK_REP0 (AR_AGC_BASE + 0x184)
272#define AR_PHY_CCK_DETECT (AR_AGC_BASE + 0x1c0)
273#define AR_PHY_DAG_CTRLCCK (AR_AGC_BASE + 0x1c4)
274#define AR_PHY_IQCORR_CTRL_CCK (AR_AGC_BASE + 0x1c8)
275
276#define AR_PHY_CCK_SPUR_MIT (AR_AGC_BASE + 0x1cc)
277#define AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR 0x000001fe
278#define AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR_S 1
279#define AR_PHY_CCK_SPUR_MIT_SPUR_FILTER_TYPE 0x60000000
280#define AR_PHY_CCK_SPUR_MIT_SPUR_FILTER_TYPE_S 29
281#define AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT 0x00000001
282#define AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT_S 0
283#define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ 0x1ffffe00
284#define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ_S 9
285
286#define AR_PHY_RX_OCGAIN (AR_AGC_BASE + 0x200)
287
288#define AR_PHY_CCA_NOM_VAL_9300_2GHZ -110
289#define AR_PHY_CCA_NOM_VAL_9300_5GHZ -115
290#define AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ -125
291#define AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ -125
292#define AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ -95
293#define AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ -100
294
295/*
296 * AGC Field Definitions
297 */
298#define AR_PHY_EXT_ATTEN_CTL_RXTX_MARGIN 0x00FC0000
299#define AR_PHY_EXT_ATTEN_CTL_RXTX_MARGIN_S 18
300#define AR_PHY_EXT_ATTEN_CTL_BSW_MARGIN 0x00003C00
301#define AR_PHY_EXT_ATTEN_CTL_BSW_MARGIN_S 10
302#define AR_PHY_EXT_ATTEN_CTL_BSW_ATTEN 0x0000001F
303#define AR_PHY_EXT_ATTEN_CTL_BSW_ATTEN_S 0
304#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_MARGIN 0x003E0000
305#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_MARGIN_S 17
306#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN 0x0001F000
307#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN_S 12
308#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_DB 0x00000FC0
309#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_DB_S 6
310#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB 0x0000003F
311#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB_S 0
312#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000
313#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12
314#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000
315#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18
316#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80
317#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7
318#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000
319#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14
320#define AR_PHY_SETTLING_SWITCH 0x00003F80
321#define AR_PHY_SETTLING_SWITCH_S 7
322#define AR_PHY_DESIRED_SZ_ADC 0x000000FF
323#define AR_PHY_DESIRED_SZ_ADC_S 0
324#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00
325#define AR_PHY_DESIRED_SZ_PGA_S 8
326#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000
327#define AR_PHY_DESIRED_SZ_TOT_DES_S 20
328#define AR_PHY_MINCCA_PWR 0x1FF00000
329#define AR_PHY_MINCCA_PWR_S 20
330#define AR_PHY_CCA_THRESH62 0x0007F000
331#define AR_PHY_CCA_THRESH62_S 12
332#define AR9280_PHY_MINCCA_PWR 0x1FF00000
333#define AR9280_PHY_MINCCA_PWR_S 20
334#define AR9280_PHY_CCA_THRESH62 0x000FF000
335#define AR9280_PHY_CCA_THRESH62_S 12
336#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
337#define AR_PHY_EXT_CCA0_THRESH62_S 0
338#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
339#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
340#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
341#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
342#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
343
344#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
345#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR_S 9
346#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00
347#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
348
349#define AR_PHY_RIFS_INIT_DELAY 0x3ff0000
350#define AR_PHY_AGC_COARSE_LOW 0x00007F80
351#define AR_PHY_AGC_COARSE_LOW_S 7
352#define AR_PHY_AGC_COARSE_HIGH 0x003F8000
353#define AR_PHY_AGC_COARSE_HIGH_S 15
354#define AR_PHY_AGC_COARSE_PWR_CONST 0x0000007F
355#define AR_PHY_AGC_COARSE_PWR_CONST_S 0
356#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000
357#define AR_PHY_FIND_SIG_FIRSTEP_S 12
358#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
359#define AR_PHY_FIND_SIG_FIRPWR_S 18
360#define AR_PHY_FIND_SIG_FIRPWR_SIGN_BIT 25
361#define AR_PHY_FIND_SIG_RELPWR (0x1f << 6)
362#define AR_PHY_FIND_SIG_RELPWR_S 6
363#define AR_PHY_FIND_SIG_RELPWR_SIGN_BIT 11
364#define AR_PHY_FIND_SIG_RELSTEP 0x1f
365#define AR_PHY_FIND_SIG_RELSTEP_S 0
366#define AR_PHY_FIND_SIG_RELSTEP_SIGN_BIT 5
367#define AR_PHY_RESTART_DIV_GC 0x001C0000
368#define AR_PHY_RESTART_DIV_GC_S 18
369#define AR_PHY_RESTART_ENA 0x01
370#define AR_PHY_DC_RESTART_DIS 0x40000000
371
372#define AR_PHY_TPC_OLPC_GAIN_DELTA_PAL_ON 0xFF000000
373#define AR_PHY_TPC_OLPC_GAIN_DELTA_PAL_ON_S 24
374#define AR_PHY_TPC_OLPC_GAIN_DELTA 0x00FF0000
375#define AR_PHY_TPC_OLPC_GAIN_DELTA_S 16
376
377#define AR_PHY_TPC_6_ERROR_EST_MODE 0x03000000
378#define AR_PHY_TPC_6_ERROR_EST_MODE_S 24
379
380/*
381 * SM Register Map
382 */
383#define AR_SM_BASE 0xa200
384
385#define AR_PHY_D2_CHIP_ID (AR_SM_BASE + 0x0)
386#define AR_PHY_GEN_CTRL (AR_SM_BASE + 0x4)
387#define AR_PHY_MODE (AR_SM_BASE + 0x8)
388#define AR_PHY_ACTIVE (AR_SM_BASE + 0xc)
389#define AR_PHY_SPUR_MASK_A (AR_SM_BASE + 0x20)
390#define AR_PHY_SPUR_MASK_B (AR_SM_BASE + 0x24)
391#define AR_PHY_SPECTRAL_SCAN (AR_SM_BASE + 0x28)
392#define AR_PHY_RADAR_BW_FILTER (AR_SM_BASE + 0x2c)
393#define AR_PHY_SEARCH_START_DELAY (AR_SM_BASE + 0x30)
394#define AR_PHY_MAX_RX_LEN (AR_SM_BASE + 0x34)
395#define AR_PHY_FRAME_CTL (AR_SM_BASE + 0x38)
396#define AR_PHY_RFBUS_REQ (AR_SM_BASE + 0x3c)
397#define AR_PHY_RFBUS_GRANT (AR_SM_BASE + 0x40)
398#define AR_PHY_RIFS (AR_SM_BASE + 0x44)
399#define AR_PHY_RX_CLR_DELAY (AR_SM_BASE + 0x50)
400#define AR_PHY_RX_DELAY (AR_SM_BASE + 0x54)
401
402#define AR_PHY_XPA_TIMING_CTL (AR_SM_BASE + 0x64)
403#define AR_PHY_MISC_PA_CTL (AR_SM_BASE + 0x80)
404#define AR_PHY_SWITCH_CHAIN_0 (AR_SM_BASE + 0x84)
405#define AR_PHY_SWITCH_COM (AR_SM_BASE + 0x88)
406#define AR_PHY_SWITCH_COM_2 (AR_SM_BASE + 0x8c)
407#define AR_PHY_RX_CHAINMASK (AR_SM_BASE + 0xa0)
408#define AR_PHY_CAL_CHAINMASK (AR_SM_BASE + 0xc0)
409#define AR_PHY_CALMODE (AR_SM_BASE + 0xc8)
410#define AR_PHY_FCAL_1 (AR_SM_BASE + 0xcc)
411#define AR_PHY_FCAL_2_0 (AR_SM_BASE + 0xd0)
412#define AR_PHY_DFT_TONE_CTL_0 (AR_SM_BASE + 0xd4)
413#define AR_PHY_CL_CAL_CTL (AR_SM_BASE + 0xd8)
414#define AR_PHY_CL_TAB_0 (AR_SM_BASE + 0x100)
415#define AR_PHY_SYNTH_CONTROL (AR_SM_BASE + 0x140)
416#define AR_PHY_ADDAC_CLK_SEL (AR_SM_BASE + 0x144)
417#define AR_PHY_PLL_CTL (AR_SM_BASE + 0x148)
418#define AR_PHY_ANALOG_SWAP (AR_SM_BASE + 0x14c)
419#define AR_PHY_ADDAC_PARA_CTL (AR_SM_BASE + 0x150)
420#define AR_PHY_XPA_CFG (AR_SM_BASE + 0x158)
421
422#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A 0x0001FC00
423#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A_S 10
424#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A 0x3FF
425#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A_S 0
426
427#define AR_PHY_TEST (AR_SM_BASE + 0x160)
428
429#define AR_PHY_TEST_BBB_OBS_SEL 0x780000
430#define AR_PHY_TEST_BBB_OBS_SEL_S 19
431
432#define AR_PHY_TEST_RX_OBS_SEL_BIT5_S 23
433#define AR_PHY_TEST_RX_OBS_SEL_BIT5 (1 << AR_PHY_TEST_RX_OBS_SEL_BIT5_S)
434
435#define AR_PHY_TEST_CHAIN_SEL 0xC0000000
436#define AR_PHY_TEST_CHAIN_SEL_S 30
437
438#define AR_PHY_TEST_CTL_STATUS (AR_SM_BASE + 0x164)
439#define AR_PHY_TEST_CTL_TSTDAC_EN 0x1
440#define AR_PHY_TEST_CTL_TSTDAC_EN_S 0
441#define AR_PHY_TEST_CTL_TX_OBS_SEL 0x1C
442#define AR_PHY_TEST_CTL_TX_OBS_SEL_S 2
443#define AR_PHY_TEST_CTL_TX_OBS_MUX_SEL 0x60
444#define AR_PHY_TEST_CTL_TX_OBS_MUX_SEL_S 5
445#define AR_PHY_TEST_CTL_TSTADC_EN 0x100
446#define AR_PHY_TEST_CTL_TSTADC_EN_S 8
447#define AR_PHY_TEST_CTL_RX_OBS_SEL 0x3C00
448#define AR_PHY_TEST_CTL_RX_OBS_SEL_S 10
449
450
451#define AR_PHY_TSTDAC (AR_SM_BASE + 0x168)
452
453#define AR_PHY_CHAN_STATUS (AR_SM_BASE + 0x16c)
454#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + 0x170)
455#define AR_PHY_CHNINFO_NOISEPWR (AR_SM_BASE + 0x174)
456#define AR_PHY_CHNINFO_GAINDIFF (AR_SM_BASE + 0x178)
457#define AR_PHY_CHNINFO_FINETIM (AR_SM_BASE + 0x17c)
458#define AR_PHY_CHAN_INFO_GAIN_0 (AR_SM_BASE + 0x180)
459#define AR_PHY_SCRAMBLER_SEED (AR_SM_BASE + 0x190)
460#define AR_PHY_CCK_TX_CTRL (AR_SM_BASE + 0x194)
461
462#define AR_PHY_HEAVYCLIP_CTL (AR_SM_BASE + 0x1a4)
463#define AR_PHY_HEAVYCLIP_20 (AR_SM_BASE + 0x1a8)
464#define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac)
465#define AR_PHY_ILLEGAL_TXRATE (AR_SM_BASE + 0x1b0)
466
467#define AR_PHY_PWRTX_MAX (AR_SM_BASE + 0x1f0)
468#define AR_PHY_POWER_TX_SUB (AR_SM_BASE + 0x1f4)
469
470#define AR_PHY_TPC_4_B0 (AR_SM_BASE + 0x204)
471#define AR_PHY_TPC_5_B0 (AR_SM_BASE + 0x208)
472#define AR_PHY_TPC_6_B0 (AR_SM_BASE + 0x20c)
473#define AR_PHY_TPC_11_B0 (AR_SM_BASE + 0x220)
474#define AR_PHY_TPC_18 (AR_SM_BASE + 0x23c)
475#define AR_PHY_TPC_19 (AR_SM_BASE + 0x240)
476
477#define AR_PHY_TX_FORCED_GAIN (AR_SM_BASE + 0x258)
478
479#define AR_PHY_PDADC_TAB_0 (AR_SM_BASE + 0x280)
480
481#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + 0x448)
482#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + 0x440)
483#define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + 0x48c)
484#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B0 (AR_SM_BASE + 0x450)
485
486#define AR_PHY_PANIC_WD_STATUS (AR_SM_BASE + 0x5c0)
487#define AR_PHY_PANIC_WD_CTL_1 (AR_SM_BASE + 0x5c4)
488#define AR_PHY_PANIC_WD_CTL_2 (AR_SM_BASE + 0x5c8)
489#define AR_PHY_BT_CTL (AR_SM_BASE + 0x5cc)
490#define AR_PHY_ONLY_WARMRESET (AR_SM_BASE + 0x5d0)
491#define AR_PHY_ONLY_CTL (AR_SM_BASE + 0x5d4)
492#define AR_PHY_ECO_CTRL (AR_SM_BASE + 0x5dc)
493#define AR_PHY_BB_THERM_ADC_1 (AR_SM_BASE + 0x248)
494
495#define AR_PHY_65NM_CH0_SYNTH4 0x1608c
496#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT 0x00000002
497#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S 1
498#define AR_PHY_65NM_CH0_SYNTH7 0x16098
499#define AR_PHY_65NM_CH0_BIAS1 0x160c0
500#define AR_PHY_65NM_CH0_BIAS2 0x160c4
501#define AR_PHY_65NM_CH0_BIAS4 0x160cc
502#define AR_PHY_65NM_CH0_RXTX4 0x1610c
503#define AR_PHY_65NM_CH0_THERM 0x16290
504
505#define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000
506#define AR_PHY_65NM_CH0_THERM_LOCAL_S 31
507#define AR_PHY_65NM_CH0_THERM_START 0x20000000
508#define AR_PHY_65NM_CH0_THERM_START_S 29
509#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT 0x0000ff00
510#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT_S 8
511
512#define AR_PHY_65NM_CH0_RXTX1 0x16100
513#define AR_PHY_65NM_CH0_RXTX2 0x16104
514#define AR_PHY_65NM_CH1_RXTX1 0x16500
515#define AR_PHY_65NM_CH1_RXTX2 0x16504
516#define AR_PHY_65NM_CH2_RXTX1 0x16900
517#define AR_PHY_65NM_CH2_RXTX2 0x16904
518
519#define AR_PHY_RX1DB_BIQUAD_LONG_SHIFT 0x00380000
520#define AR_PHY_RX1DB_BIQUAD_LONG_SHIFT_S 19
521#define AR_PHY_RX6DB_BIQUAD_LONG_SHIFT 0x00c00000
522#define AR_PHY_RX6DB_BIQUAD_LONG_SHIFT_S 22
523#define AR_PHY_LNAGAIN_LONG_SHIFT 0xe0000000
524#define AR_PHY_LNAGAIN_LONG_SHIFT_S 29
525#define AR_PHY_MXRGAIN_LONG_SHIFT 0x03000000
526#define AR_PHY_MXRGAIN_LONG_SHIFT_S 24
527#define AR_PHY_VGAGAIN_LONG_SHIFT 0x1c000000
528#define AR_PHY_VGAGAIN_LONG_SHIFT_S 26
529#define AR_PHY_SCFIR_GAIN_LONG_SHIFT 0x00000001
530#define AR_PHY_SCFIR_GAIN_LONG_SHIFT_S 0
531#define AR_PHY_MANRXGAIN_LONG_SHIFT 0x00000002
532#define AR_PHY_MANRXGAIN_LONG_SHIFT_S 1
533
534/*
535 * SM Field Definitions
536 */
537#define AR_PHY_CL_CAL_ENABLE 0x00000002
538#define AR_PHY_PARALLEL_CAL_ENABLE 0x00000001
539#define AR_PHY_TPCRG1_PD_CAL_ENABLE 0x00400000
540#define AR_PHY_TPCRG1_PD_CAL_ENABLE_S 22
541
542#define AR_PHY_ADDAC_PARACTL_OFF_PWDADC 0x00008000
543
544#define AR_PHY_FCAL20_CAP_STATUS_0 0x01f00000
545#define AR_PHY_FCAL20_CAP_STATUS_0_S 20
546
547#define AR_PHY_RFBUS_REQ_EN 0x00000001 /* request for RF bus */
548#define AR_PHY_RFBUS_GRANT_EN 0x00000001 /* RF bus granted */
549#define AR_PHY_GC_TURBO_MODE 0x00000001 /* set turbo mode bits */
550#define AR_PHY_GC_TURBO_SHORT 0x00000002 /* set short symbols to turbo mode setting */
551#define AR_PHY_GC_DYN2040_EN 0x00000004 /* enable dyn 20/40 mode */
552#define AR_PHY_GC_DYN2040_PRI_ONLY 0x00000008 /* dyn 20/40 - primary only */
553#define AR_PHY_GC_DYN2040_PRI_CH 0x00000010 /* dyn 20/40 - primary ch offset (0=+10MHz, 1=-10MHz)*/
554#define AR_PHY_GC_DYN2040_PRI_CH_S 4
555#define AR_PHY_GC_DYN2040_EXT_CH 0x00000020 /* dyn 20/40 - ext ch spacing (0=20MHz/ 1=25MHz) */
556#define AR_PHY_GC_HT_EN 0x00000040 /* ht enable */
557#define AR_PHY_GC_SHORT_GI_40 0x00000080 /* allow short GI for HT 40 */
558#define AR_PHY_GC_WALSH 0x00000100 /* walsh spatial spreading for 2 chains,2 streams TX */
559#define AR_PHY_GC_SINGLE_HT_LTF1 0x00000200 /* single length (4us) 1st HT long training symbol */
560#define AR_PHY_GC_GF_DETECT_EN 0x00000400 /* enable Green Field detection. Only affects rx, not tx */
561#define AR_PHY_GC_ENABLE_DAC_FIFO 0x00000800 /* fifo between bb and dac */
562#define AR_PHY_RX_DELAY_DELAY 0x00003FFF /* delay from wakeup to rx ena */
563
564#define AR_PHY_CALMODE_IQ 0x00000000
565#define AR_PHY_CALMODE_ADC_GAIN 0x00000001
566#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002
567#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003
568#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
569#define AR_PHY_MODE_OFDM 0x00000000
570#define AR_PHY_MODE_CCK 0x00000001
571#define AR_PHY_MODE_DYNAMIC 0x00000004
572#define AR_PHY_MODE_DYNAMIC_S 2
573#define AR_PHY_MODE_HALF 0x00000020
574#define AR_PHY_MODE_QUARTER 0x00000040
575#define AR_PHY_MAC_CLK_MODE 0x00000080
576#define AR_PHY_MODE_DYN_CCK_DISABLE 0x00000100
577#define AR_PHY_MODE_SVD_HALF 0x00000200
578#define AR_PHY_ACTIVE_EN 0x00000001
579#define AR_PHY_ACTIVE_DIS 0x00000000
580#define AR_PHY_FORCE_XPA_CFG 0x000000001
581#define AR_PHY_FORCE_XPA_CFG_S 0
582#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF 0xFF000000
583#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF_S 24
584#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF 0x00FF0000
585#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF_S 16
586#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAB_ON 0x0000FF00
587#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAB_ON_S 8
588#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAA_ON 0x000000FF
589#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAA_ON_S 0
590#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
591#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
592#define AR_PHY_TX_END_DATA_START 0x000000FF
593#define AR_PHY_TX_END_DATA_START_S 0
594#define AR_PHY_TX_END_PA_ON 0x0000FF00
595#define AR_PHY_TX_END_PA_ON_S 8
596#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F
597#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
598#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0
599#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
600#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00
601#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
602#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000
603#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
604#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000
605#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
606#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
607#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14
608#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000
609#define AR_PHY_TPCRG1_PD_GAIN_1_S 16
610#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000
611#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
612#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
613#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
614#define AR_PHY_TPCGR1_FORCED_DAC_GAIN 0x0000003e
615#define AR_PHY_TPCGR1_FORCED_DAC_GAIN_S 1
616#define AR_PHY_TPCGR1_FORCE_DAC_GAIN 0x00000001
617#define AR_PHY_TXGAIN_FORCE 0x00000001
618#define AR_PHY_TXGAIN_FORCED_PADVGNRA 0x00003c00
619#define AR_PHY_TXGAIN_FORCED_PADVGNRA_S 10
620#define AR_PHY_TXGAIN_FORCED_PADVGNRB 0x0003c000
621#define AR_PHY_TXGAIN_FORCED_PADVGNRB_S 14
622#define AR_PHY_TXGAIN_FORCED_PADVGNRD 0x00c00000
623#define AR_PHY_TXGAIN_FORCED_PADVGNRD_S 22
624#define AR_PHY_TXGAIN_FORCED_TXMXRGAIN 0x000003c0
625#define AR_PHY_TXGAIN_FORCED_TXMXRGAIN_S 6
626#define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN 0x0000000e
627#define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN_S 1
628
629#define AR_PHY_POWER_TX_RATE1 0x9934
630#define AR_PHY_POWER_TX_RATE2 0x9938
631#define AR_PHY_POWER_TX_RATE_MAX 0x993c
632#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
633#define PHY_AGC_CLR 0x10000000
634#define RFSILENT_BB 0x00002000
635#define AR_PHY_CHAN_INFO_GAIN_DIFF_PPM_MASK 0xFFF
636#define AR_PHY_CHAN_INFO_GAIN_DIFF_PPM_SIGNED_BIT 0x800
637#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
638#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
639#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
640#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
641#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x00000001
642#define AR_PHY_SPECTRAL_SCAN_ENABLE_S 0
643#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002
644#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1
645#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0
646#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
647#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00
648#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
649#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000
650#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
651#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000
652#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24
653#define AR_PHY_CHANNEL_STATUS_RX_CLEAR 0x00000004
654#define AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT 0x01fc0000
655#define AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT_S 18
656#define AR_PHY_TX_IQCAL_START_DO_CAL 0x00000001
657#define AR_PHY_TX_IQCAL_START_DO_CAL_S 0
658
659#define AR_PHY_TX_IQCAL_STATUS_FAILED 0x00000001
660#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE 0x00003fff
661#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE_S 0
662
663#define AR_PHY_TPC_18_THERM_CAL_VALUE 0xff
664#define AR_PHY_TPC_18_THERM_CAL_VALUE_S 0
665#define AR_PHY_TPC_19_ALPHA_THERM 0xff
666#define AR_PHY_TPC_19_ALPHA_THERM_S 0
667
668#define AR_PHY_65NM_CH0_RXTX4_THERM_ON 0x10000000
669#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S 28
670
671#define AR_PHY_BB_THERM_ADC_1_INIT_THERM 0x000000ff
672#define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S 0
673
674/*
675 * Channel 1 Register Map
676 */
677#define AR_CHAN1_BASE 0xa800
678
679#define AR_PHY_EXT_CCA_1 (AR_CHAN1_BASE + 0x30)
680#define AR_PHY_TX_PHASE_RAMP_1 (AR_CHAN1_BASE + 0xd0)
681#define AR_PHY_ADC_GAIN_DC_CORR_1 (AR_CHAN1_BASE + 0xd4)
682
683#define AR_PHY_SPUR_REPORT_1 (AR_CHAN1_BASE + 0xa8)
684#define AR_PHY_CHAN_INFO_TAB_1 (AR_CHAN1_BASE + 0x300)
685#define AR_PHY_RX_IQCAL_CORR_B1 (AR_CHAN1_BASE + 0xdc)
686
687/*
688 * Channel 1 Field Definitions
689 */
690#define AR_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000
691#define AR_PHY_CH1_EXT_MINCCA_PWR_S 16
692
693/*
694 * AGC 1 Register Map
695 */
696#define AR_AGC1_BASE 0xae00
697
698#define AR_PHY_FORCEMAX_GAINS_1 (AR_AGC1_BASE + 0x4)
699#define AR_PHY_EXT_ATTEN_CTL_1 (AR_AGC1_BASE + 0x18)
700#define AR_PHY_CCA_1 (AR_AGC1_BASE + 0x1c)
701#define AR_PHY_CCA_CTRL_1 (AR_AGC1_BASE + 0x20)
702#define AR_PHY_RSSI_1 (AR_AGC1_BASE + 0x180)
703#define AR_PHY_SPUR_CCK_REP_1 (AR_AGC1_BASE + 0x184)
704#define AR_PHY_RX_OCGAIN_2 (AR_AGC1_BASE + 0x200)
705
706/*
707 * AGC 1 Field Definitions
708 */
709#define AR_PHY_CH1_MINCCA_PWR 0x1FF00000
710#define AR_PHY_CH1_MINCCA_PWR_S 20
711
712/*
713 * SM 1 Register Map
714 */
715#define AR_SM1_BASE 0xb200
716
717#define AR_PHY_SWITCH_CHAIN_1 (AR_SM1_BASE + 0x84)
718#define AR_PHY_FCAL_2_1 (AR_SM1_BASE + 0xd0)
719#define AR_PHY_DFT_TONE_CTL_1 (AR_SM1_BASE + 0xd4)
720#define AR_PHY_CL_TAB_1 (AR_SM1_BASE + 0x100)
721#define AR_PHY_CHAN_INFO_GAIN_1 (AR_SM1_BASE + 0x180)
722#define AR_PHY_TPC_4_B1 (AR_SM1_BASE + 0x204)
723#define AR_PHY_TPC_5_B1 (AR_SM1_BASE + 0x208)
724#define AR_PHY_TPC_6_B1 (AR_SM1_BASE + 0x20c)
725#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
726#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + 0x240)
727#define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c)
728#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B1 (AR_SM1_BASE + 0x450)
729
730/*
731 * Channel 2 Register Map
732 */
733#define AR_CHAN2_BASE 0xb800
734
735#define AR_PHY_EXT_CCA_2 (AR_CHAN2_BASE + 0x30)
736#define AR_PHY_TX_PHASE_RAMP_2 (AR_CHAN2_BASE + 0xd0)
737#define AR_PHY_ADC_GAIN_DC_CORR_2 (AR_CHAN2_BASE + 0xd4)
738
739#define AR_PHY_SPUR_REPORT_2 (AR_CHAN2_BASE + 0xa8)
740#define AR_PHY_CHAN_INFO_TAB_2 (AR_CHAN2_BASE + 0x300)
741#define AR_PHY_RX_IQCAL_CORR_B2 (AR_CHAN2_BASE + 0xdc)
742
743/*
744 * Channel 2 Field Definitions
745 */
746#define AR_PHY_CH2_EXT_MINCCA_PWR 0x01FF0000
747#define AR_PHY_CH2_EXT_MINCCA_PWR_S 16
748/*
749 * AGC 2 Register Map
750 */
751#define AR_AGC2_BASE 0xbe00
752
753#define AR_PHY_FORCEMAX_GAINS_2 (AR_AGC2_BASE + 0x4)
754#define AR_PHY_EXT_ATTEN_CTL_2 (AR_AGC2_BASE + 0x18)
755#define AR_PHY_CCA_2 (AR_AGC2_BASE + 0x1c)
756#define AR_PHY_CCA_CTRL_2 (AR_AGC2_BASE + 0x20)
757#define AR_PHY_RSSI_2 (AR_AGC2_BASE + 0x180)
758
759/*
760 * AGC 2 Field Definitions
761 */
762#define AR_PHY_CH2_MINCCA_PWR 0x1FF00000
763#define AR_PHY_CH2_MINCCA_PWR_S 20
764
765/*
766 * SM 2 Register Map
767 */
768#define AR_SM2_BASE 0xc200
769
770#define AR_PHY_SWITCH_CHAIN_2 (AR_SM2_BASE + 0x84)
771#define AR_PHY_FCAL_2_2 (AR_SM2_BASE + 0xd0)
772#define AR_PHY_DFT_TONE_CTL_2 (AR_SM2_BASE + 0xd4)
773#define AR_PHY_CL_TAB_2 (AR_SM2_BASE + 0x100)
774#define AR_PHY_CHAN_INFO_GAIN_2 (AR_SM2_BASE + 0x180)
775#define AR_PHY_TPC_4_B2 (AR_SM2_BASE + 0x204)
776#define AR_PHY_TPC_5_B2 (AR_SM2_BASE + 0x208)
777#define AR_PHY_TPC_6_B2 (AR_SM2_BASE + 0x20c)
778#define AR_PHY_TPC_11_B2 (AR_SM2_BASE + 0x220)
779#define AR_PHY_PDADC_TAB_2 (AR_SM2_BASE + 0x240)
780#define AR_PHY_TX_IQCAL_STATUS_B2 (AR_SM2_BASE + 0x48c)
781#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B2 (AR_SM2_BASE + 0x450)
782
783#define AR_PHY_TX_IQCAL_STATUS_B2_FAILED 0x00000001
784
785/*
786 * AGC 3 Register Map
787 */
788#define AR_AGC3_BASE 0xce00
789
790#define AR_PHY_RSSI_3 (AR_AGC3_BASE + 0x180)
791
792/*
793 * Misc helper defines
794 */
795#define AR_PHY_CHAIN_OFFSET (AR_CHAN1_BASE - AR_CHAN_BASE)
796
797#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (AR_PHY_ADC_GAIN_DC_CORR_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
798#define AR_PHY_NEW_ADC_DC_GAIN_CORR_9300_10(_i) (AR_PHY_ADC_GAIN_DC_CORR_0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
799#define AR_PHY_SWITCH_CHAIN(_i) (AR_PHY_SWITCH_CHAIN_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
800#define AR_PHY_EXT_ATTEN_CTL(_i) (AR_PHY_EXT_ATTEN_CTL_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
801
802#define AR_PHY_RXGAIN(_i) (AR_PHY_FORCEMAX_GAINS_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
803#define AR_PHY_TPCRG5(_i) (AR_PHY_TPC_5_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
804#define AR_PHY_PDADC_TAB(_i) (AR_PHY_PDADC_TAB_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
805
806#define AR_PHY_CAL_MEAS_0(_i) (AR_PHY_IQ_ADC_MEAS_0_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
807#define AR_PHY_CAL_MEAS_1(_i) (AR_PHY_IQ_ADC_MEAS_1_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
808#define AR_PHY_CAL_MEAS_2(_i) (AR_PHY_IQ_ADC_MEAS_2_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
809#define AR_PHY_CAL_MEAS_3(_i) (AR_PHY_IQ_ADC_MEAS_3_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
810#define AR_PHY_CAL_MEAS_0_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_0_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
811#define AR_PHY_CAL_MEAS_1_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_1_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
812#define AR_PHY_CAL_MEAS_2_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_2_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
813#define AR_PHY_CAL_MEAS_3_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_3_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
814
815#define AR_PHY_BB_PANIC_NON_IDLE_ENABLE 0x00000001
816#define AR_PHY_BB_PANIC_IDLE_ENABLE 0x00000002
817#define AR_PHY_BB_PANIC_IDLE_MASK 0xFFFF0000
818#define AR_PHY_BB_PANIC_NON_IDLE_MASK 0x0000FFFC
819
820#define AR_PHY_BB_PANIC_RST_ENABLE 0x00000002
821#define AR_PHY_BB_PANIC_IRQ_ENABLE 0x00000004
822#define AR_PHY_BB_PANIC_CNTL2_MASK 0xFFFFFFF9
823
824#define AR_PHY_BB_WD_STATUS 0x00000007
825#define AR_PHY_BB_WD_STATUS_S 0
826#define AR_PHY_BB_WD_DET_HANG 0x00000008
827#define AR_PHY_BB_WD_DET_HANG_S 3
828#define AR_PHY_BB_WD_RADAR_SM 0x000000F0
829#define AR_PHY_BB_WD_RADAR_SM_S 4
830#define AR_PHY_BB_WD_RX_OFDM_SM 0x00000F00
831#define AR_PHY_BB_WD_RX_OFDM_SM_S 8
832#define AR_PHY_BB_WD_RX_CCK_SM 0x0000F000
833#define AR_PHY_BB_WD_RX_CCK_SM_S 12
834#define AR_PHY_BB_WD_TX_OFDM_SM 0x000F0000
835#define AR_PHY_BB_WD_TX_OFDM_SM_S 16
836#define AR_PHY_BB_WD_TX_CCK_SM 0x00F00000
837#define AR_PHY_BB_WD_TX_CCK_SM_S 20
838#define AR_PHY_BB_WD_AGC_SM 0x0F000000
839#define AR_PHY_BB_WD_AGC_SM_S 24
840#define AR_PHY_BB_WD_SRCH_SM 0xF0000000
841#define AR_PHY_BB_WD_SRCH_SM_S 28
842
843#define AR_PHY_BB_WD_STATUS_CLR 0x00000008
844
845void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
846
847#endif /* AR9003_PHY_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index bdcd257ca7a4..fbb7dec6ddeb 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -114,8 +114,10 @@ enum buffer_type {
114#define bf_isretried(bf) (bf->bf_state.bf_type & BUF_RETRY) 114#define bf_isretried(bf) (bf->bf_state.bf_type & BUF_RETRY)
115#define bf_isxretried(bf) (bf->bf_state.bf_type & BUF_XRETRY) 115#define bf_isxretried(bf) (bf->bf_state.bf_type & BUF_XRETRY)
116 116
117#define ATH_TXSTATUS_RING_SIZE 64
118
117struct ath_descdma { 119struct ath_descdma {
118 struct ath_desc *dd_desc; 120 void *dd_desc;
119 dma_addr_t dd_desc_paddr; 121 dma_addr_t dd_desc_paddr;
120 u32 dd_desc_len; 122 u32 dd_desc_len;
121 struct ath_buf *dd_bufptr; 123 struct ath_buf *dd_bufptr;
@@ -123,7 +125,7 @@ struct ath_descdma {
123 125
124int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, 126int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
125 struct list_head *head, const char *name, 127 struct list_head *head, const char *name,
126 int nbuf, int ndesc); 128 int nbuf, int ndesc, bool is_tx);
127void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd, 129void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
128 struct list_head *head); 130 struct list_head *head);
129 131
@@ -188,6 +190,7 @@ enum ATH_AGGR_STATUS {
188 ATH_AGGR_LIMITED, 190 ATH_AGGR_LIMITED,
189}; 191};
190 192
193#define ATH_TXFIFO_DEPTH 8
191struct ath_txq { 194struct ath_txq {
192 u32 axq_qnum; 195 u32 axq_qnum;
193 u32 *axq_link; 196 u32 *axq_link;
@@ -197,6 +200,10 @@ struct ath_txq {
197 bool stopped; 200 bool stopped;
198 bool axq_tx_inprogress; 201 bool axq_tx_inprogress;
199 struct list_head axq_acq; 202 struct list_head axq_acq;
203 struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
204 struct list_head txq_fifo_pending;
205 u8 txq_headidx;
206 u8 txq_tailidx;
200}; 207};
201 208
202#define AGGR_CLEANUP BIT(1) 209#define AGGR_CLEANUP BIT(1)
@@ -223,6 +230,12 @@ struct ath_tx {
223 struct ath_descdma txdma; 230 struct ath_descdma txdma;
224}; 231};
225 232
233struct ath_rx_edma {
234 struct sk_buff_head rx_fifo;
235 struct sk_buff_head rx_buffers;
236 u32 rx_fifo_hwsize;
237};
238
226struct ath_rx { 239struct ath_rx {
227 u8 defant; 240 u8 defant;
228 u8 rxotherant; 241 u8 rxotherant;
@@ -232,6 +245,8 @@ struct ath_rx {
232 spinlock_t rxbuflock; 245 spinlock_t rxbuflock;
233 struct list_head rxbuf; 246 struct list_head rxbuf;
234 struct ath_descdma rxdma; 247 struct ath_descdma rxdma;
248 struct ath_buf *rx_bufptr;
249 struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
235}; 250};
236 251
237int ath_startrecv(struct ath_softc *sc); 252int ath_startrecv(struct ath_softc *sc);
@@ -240,7 +255,7 @@ void ath_flushrecv(struct ath_softc *sc);
240u32 ath_calcrxfilter(struct ath_softc *sc); 255u32 ath_calcrxfilter(struct ath_softc *sc);
241int ath_rx_init(struct ath_softc *sc, int nbufs); 256int ath_rx_init(struct ath_softc *sc, int nbufs);
242void ath_rx_cleanup(struct ath_softc *sc); 257void ath_rx_cleanup(struct ath_softc *sc);
243int ath_rx_tasklet(struct ath_softc *sc, int flush); 258int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp);
244struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype); 259struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
245void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq); 260void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
246int ath_tx_setup(struct ath_softc *sc, int haltype); 261int ath_tx_setup(struct ath_softc *sc, int haltype);
@@ -258,6 +273,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
258int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, 273int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
259 struct ath_tx_control *txctl); 274 struct ath_tx_control *txctl);
260void ath_tx_tasklet(struct ath_softc *sc); 275void ath_tx_tasklet(struct ath_softc *sc);
276void ath_tx_edma_tasklet(struct ath_softc *sc);
261void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb); 277void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb);
262bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno); 278bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno);
263void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 279void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -507,6 +523,8 @@ struct ath_softc {
507 struct ath_beacon_config cur_beacon_conf; 523 struct ath_beacon_config cur_beacon_conf;
508 struct delayed_work tx_complete_work; 524 struct delayed_work tx_complete_work;
509 struct ath_btcoex btcoex; 525 struct ath_btcoex btcoex;
526
527 struct ath_descdma txsdma;
510}; 528};
511 529
512struct ath_wiphy { 530struct ath_wiphy {
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 22375a754718..c8a4558f79ba 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -93,8 +93,6 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
93 antenna = ((sc->beacon.ast_be_xmit / sc->nbcnvifs) & 1 ? 2 : 1); 93 antenna = ((sc->beacon.ast_be_xmit / sc->nbcnvifs) & 1 ? 2 : 1);
94 } 94 }
95 95
96 ds->ds_data = bf->bf_buf_addr;
97
98 sband = &sc->sbands[common->hw->conf.channel->band]; 96 sband = &sc->sbands[common->hw->conf.channel->band];
99 rate = sband->bitrates[rateidx].hw_value; 97 rate = sband->bitrates[rateidx].hw_value;
100 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT) 98 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
@@ -109,7 +107,8 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
109 107
110 /* NB: beacon's BufLen must be a multiple of 4 bytes */ 108 /* NB: beacon's BufLen must be a multiple of 4 bytes */
111 ath9k_hw_filltxdesc(ah, ds, roundup(skb->len, 4), 109 ath9k_hw_filltxdesc(ah, ds, roundup(skb->len, 4),
112 true, true, ds); 110 true, true, ds, bf->bf_buf_addr,
111 sc->beacon.beaconq);
113 112
114 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4); 113 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
115 series[0].Tries = 1; 114 series[0].Tries = 1;
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 064f5b51dfcd..6982577043b8 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -15,10 +15,12 @@
15 */ 15 */
16 16
17#include "hw.h" 17#include "hw.h"
18#include "hw-ops.h"
19
20/* Common calibration code */
18 21
19/* We can tune this as we go by monitoring really low values */ 22/* We can tune this as we go by monitoring really low values */
20#define ATH9K_NF_TOO_LOW -60 23#define ATH9K_NF_TOO_LOW -60
21#define AR9285_CLCAL_REDO_THRESH 1
22 24
23/* AR5416 may return very high value (like -31 dBm), in those cases the nf 25/* AR5416 may return very high value (like -31 dBm), in those cases the nf
24 * is incorrect and we should use the static NF value. Later we can try to 26 * is incorrect and we should use the static NF value. Later we can try to
@@ -87,98 +89,9 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h,
87 return; 89 return;
88} 90}
89 91
90static void ath9k_hw_do_getnf(struct ath_hw *ah, 92static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
91 int16_t nfarray[NUM_NF_READINGS]) 93 enum ieee80211_band band,
92{ 94 int16_t *nft)
93 struct ath_common *common = ath9k_hw_common(ah);
94 int16_t nf;
95
96 if (AR_SREV_9280_10_OR_LATER(ah))
97 nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
98 else
99 nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
100
101 if (nf & 0x100)
102 nf = 0 - ((nf ^ 0x1ff) + 1);
103 ath_print(common, ATH_DBG_CALIBRATE,
104 "NF calibrated [ctl] [chain 0] is %d\n", nf);
105
106 if (AR_SREV_9271(ah) && (nf >= -114))
107 nf = -116;
108
109 nfarray[0] = nf;
110
111 if (!AR_SREV_9285(ah) && !AR_SREV_9271(ah)) {
112 if (AR_SREV_9280_10_OR_LATER(ah))
113 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
114 AR9280_PHY_CH1_MINCCA_PWR);
115 else
116 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
117 AR_PHY_CH1_MINCCA_PWR);
118
119 if (nf & 0x100)
120 nf = 0 - ((nf ^ 0x1ff) + 1);
121 ath_print(common, ATH_DBG_CALIBRATE,
122 "NF calibrated [ctl] [chain 1] is %d\n", nf);
123 nfarray[1] = nf;
124
125 if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) {
126 nf = MS(REG_READ(ah, AR_PHY_CH2_CCA),
127 AR_PHY_CH2_MINCCA_PWR);
128 if (nf & 0x100)
129 nf = 0 - ((nf ^ 0x1ff) + 1);
130 ath_print(common, ATH_DBG_CALIBRATE,
131 "NF calibrated [ctl] [chain 2] is %d\n", nf);
132 nfarray[2] = nf;
133 }
134 }
135
136 if (AR_SREV_9280_10_OR_LATER(ah))
137 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
138 AR9280_PHY_EXT_MINCCA_PWR);
139 else
140 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
141 AR_PHY_EXT_MINCCA_PWR);
142
143 if (nf & 0x100)
144 nf = 0 - ((nf ^ 0x1ff) + 1);
145 ath_print(common, ATH_DBG_CALIBRATE,
146 "NF calibrated [ext] [chain 0] is %d\n", nf);
147
148 if (AR_SREV_9271(ah) && (nf >= -114))
149 nf = -116;
150
151 nfarray[3] = nf;
152
153 if (!AR_SREV_9285(ah) && !AR_SREV_9271(ah)) {
154 if (AR_SREV_9280_10_OR_LATER(ah))
155 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
156 AR9280_PHY_CH1_EXT_MINCCA_PWR);
157 else
158 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
159 AR_PHY_CH1_EXT_MINCCA_PWR);
160
161 if (nf & 0x100)
162 nf = 0 - ((nf ^ 0x1ff) + 1);
163 ath_print(common, ATH_DBG_CALIBRATE,
164 "NF calibrated [ext] [chain 1] is %d\n", nf);
165 nfarray[4] = nf;
166
167 if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) {
168 nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA),
169 AR_PHY_CH2_EXT_MINCCA_PWR);
170 if (nf & 0x100)
171 nf = 0 - ((nf ^ 0x1ff) + 1);
172 ath_print(common, ATH_DBG_CALIBRATE,
173 "NF calibrated [ext] [chain 2] is %d\n", nf);
174 nfarray[5] = nf;
175 }
176 }
177}
178
179static bool getNoiseFloorThresh(struct ath_hw *ah,
180 enum ieee80211_band band,
181 int16_t *nft)
182{ 95{
183 switch (band) { 96 switch (band) {
184 case IEEE80211_BAND_5GHZ: 97 case IEEE80211_BAND_5GHZ:
@@ -195,44 +108,8 @@ static bool getNoiseFloorThresh(struct ath_hw *ah,
195 return true; 108 return true;
196} 109}
197 110
198static void ath9k_hw_setup_calibration(struct ath_hw *ah, 111void ath9k_hw_reset_calibration(struct ath_hw *ah,
199 struct ath9k_cal_list *currCal) 112 struct ath9k_cal_list *currCal)
200{
201 struct ath_common *common = ath9k_hw_common(ah);
202
203 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
204 AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX,
205 currCal->calData->calCountMax);
206
207 switch (currCal->calData->calType) {
208 case IQ_MISMATCH_CAL:
209 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
210 ath_print(common, ATH_DBG_CALIBRATE,
211 "starting IQ Mismatch Calibration\n");
212 break;
213 case ADC_GAIN_CAL:
214 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
215 ath_print(common, ATH_DBG_CALIBRATE,
216 "starting ADC Gain Calibration\n");
217 break;
218 case ADC_DC_CAL:
219 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
220 ath_print(common, ATH_DBG_CALIBRATE,
221 "starting ADC DC Calibration\n");
222 break;
223 case ADC_DC_INIT_CAL:
224 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT);
225 ath_print(common, ATH_DBG_CALIBRATE,
226 "starting Init ADC DC Calibration\n");
227 break;
228 }
229
230 REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
231 AR_PHY_TIMING_CTRL4_DO_CAL);
232}
233
234static void ath9k_hw_reset_calibration(struct ath_hw *ah,
235 struct ath9k_cal_list *currCal)
236{ 113{
237 int i; 114 int i;
238 115
@@ -250,324 +127,6 @@ static void ath9k_hw_reset_calibration(struct ath_hw *ah,
250 ah->cal_samples = 0; 127 ah->cal_samples = 0;
251} 128}
252 129
253static bool ath9k_hw_per_calibration(struct ath_hw *ah,
254 struct ath9k_channel *ichan,
255 u8 rxchainmask,
256 struct ath9k_cal_list *currCal)
257{
258 bool iscaldone = false;
259
260 if (currCal->calState == CAL_RUNNING) {
261 if (!(REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) &
262 AR_PHY_TIMING_CTRL4_DO_CAL)) {
263
264 currCal->calData->calCollect(ah);
265 ah->cal_samples++;
266
267 if (ah->cal_samples >= currCal->calData->calNumSamples) {
268 int i, numChains = 0;
269 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
270 if (rxchainmask & (1 << i))
271 numChains++;
272 }
273
274 currCal->calData->calPostProc(ah, numChains);
275 ichan->CalValid |= currCal->calData->calType;
276 currCal->calState = CAL_DONE;
277 iscaldone = true;
278 } else {
279 ath9k_hw_setup_calibration(ah, currCal);
280 }
281 }
282 } else if (!(ichan->CalValid & currCal->calData->calType)) {
283 ath9k_hw_reset_calibration(ah, currCal);
284 }
285
286 return iscaldone;
287}
288
289/* Assumes you are talking about the currently configured channel */
290static bool ath9k_hw_iscal_supported(struct ath_hw *ah,
291 enum ath9k_cal_types calType)
292{
293 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
294
295 switch (calType & ah->supp_cals) {
296 case IQ_MISMATCH_CAL: /* Both 2 GHz and 5 GHz support OFDM */
297 return true;
298 case ADC_GAIN_CAL:
299 case ADC_DC_CAL:
300 if (!(conf->channel->band == IEEE80211_BAND_2GHZ &&
301 conf_is_ht20(conf)))
302 return true;
303 break;
304 }
305 return false;
306}
307
308static void ath9k_hw_iqcal_collect(struct ath_hw *ah)
309{
310 int i;
311
312 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
313 ah->totalPowerMeasI[i] +=
314 REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
315 ah->totalPowerMeasQ[i] +=
316 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
317 ah->totalIqCorrMeas[i] +=
318 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
319 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
320 "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
321 ah->cal_samples, i, ah->totalPowerMeasI[i],
322 ah->totalPowerMeasQ[i],
323 ah->totalIqCorrMeas[i]);
324 }
325}
326
327static void ath9k_hw_adc_gaincal_collect(struct ath_hw *ah)
328{
329 int i;
330
331 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
332 ah->totalAdcIOddPhase[i] +=
333 REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
334 ah->totalAdcIEvenPhase[i] +=
335 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
336 ah->totalAdcQOddPhase[i] +=
337 REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
338 ah->totalAdcQEvenPhase[i] +=
339 REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
340
341 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
342 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
343 "oddq=0x%08x; evenq=0x%08x;\n",
344 ah->cal_samples, i,
345 ah->totalAdcIOddPhase[i],
346 ah->totalAdcIEvenPhase[i],
347 ah->totalAdcQOddPhase[i],
348 ah->totalAdcQEvenPhase[i]);
349 }
350}
351
352static void ath9k_hw_adc_dccal_collect(struct ath_hw *ah)
353{
354 int i;
355
356 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
357 ah->totalAdcDcOffsetIOddPhase[i] +=
358 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
359 ah->totalAdcDcOffsetIEvenPhase[i] +=
360 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
361 ah->totalAdcDcOffsetQOddPhase[i] +=
362 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
363 ah->totalAdcDcOffsetQEvenPhase[i] +=
364 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
365
366 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
367 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
368 "oddq=0x%08x; evenq=0x%08x;\n",
369 ah->cal_samples, i,
370 ah->totalAdcDcOffsetIOddPhase[i],
371 ah->totalAdcDcOffsetIEvenPhase[i],
372 ah->totalAdcDcOffsetQOddPhase[i],
373 ah->totalAdcDcOffsetQEvenPhase[i]);
374 }
375}
376
377static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
378{
379 struct ath_common *common = ath9k_hw_common(ah);
380 u32 powerMeasQ, powerMeasI, iqCorrMeas;
381 u32 qCoffDenom, iCoffDenom;
382 int32_t qCoff, iCoff;
383 int iqCorrNeg, i;
384
385 for (i = 0; i < numChains; i++) {
386 powerMeasI = ah->totalPowerMeasI[i];
387 powerMeasQ = ah->totalPowerMeasQ[i];
388 iqCorrMeas = ah->totalIqCorrMeas[i];
389
390 ath_print(common, ATH_DBG_CALIBRATE,
391 "Starting IQ Cal and Correction for Chain %d\n",
392 i);
393
394 ath_print(common, ATH_DBG_CALIBRATE,
395 "Orignal: Chn %diq_corr_meas = 0x%08x\n",
396 i, ah->totalIqCorrMeas[i]);
397
398 iqCorrNeg = 0;
399
400 if (iqCorrMeas > 0x80000000) {
401 iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
402 iqCorrNeg = 1;
403 }
404
405 ath_print(common, ATH_DBG_CALIBRATE,
406 "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
407 ath_print(common, ATH_DBG_CALIBRATE,
408 "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
409 ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
410 iqCorrNeg);
411
412 iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
413 qCoffDenom = powerMeasQ / 64;
414
415 if ((powerMeasQ != 0) && (iCoffDenom != 0) &&
416 (qCoffDenom != 0)) {
417 iCoff = iqCorrMeas / iCoffDenom;
418 qCoff = powerMeasI / qCoffDenom - 64;
419 ath_print(common, ATH_DBG_CALIBRATE,
420 "Chn %d iCoff = 0x%08x\n", i, iCoff);
421 ath_print(common, ATH_DBG_CALIBRATE,
422 "Chn %d qCoff = 0x%08x\n", i, qCoff);
423
424 iCoff = iCoff & 0x3f;
425 ath_print(common, ATH_DBG_CALIBRATE,
426 "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
427 if (iqCorrNeg == 0x0)
428 iCoff = 0x40 - iCoff;
429
430 if (qCoff > 15)
431 qCoff = 15;
432 else if (qCoff <= -16)
433 qCoff = 16;
434
435 ath_print(common, ATH_DBG_CALIBRATE,
436 "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
437 i, iCoff, qCoff);
438
439 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
440 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
441 iCoff);
442 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
443 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
444 qCoff);
445 ath_print(common, ATH_DBG_CALIBRATE,
446 "IQ Cal and Correction done for Chain %d\n",
447 i);
448 }
449 }
450
451 REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
452 AR_PHY_TIMING_CTRL4_IQCORR_ENABLE);
453}
454
455static void ath9k_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
456{
457 struct ath_common *common = ath9k_hw_common(ah);
458 u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset, qEvenMeasOffset;
459 u32 qGainMismatch, iGainMismatch, val, i;
460
461 for (i = 0; i < numChains; i++) {
462 iOddMeasOffset = ah->totalAdcIOddPhase[i];
463 iEvenMeasOffset = ah->totalAdcIEvenPhase[i];
464 qOddMeasOffset = ah->totalAdcQOddPhase[i];
465 qEvenMeasOffset = ah->totalAdcQEvenPhase[i];
466
467 ath_print(common, ATH_DBG_CALIBRATE,
468 "Starting ADC Gain Cal for Chain %d\n", i);
469
470 ath_print(common, ATH_DBG_CALIBRATE,
471 "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
472 iOddMeasOffset);
473 ath_print(common, ATH_DBG_CALIBRATE,
474 "Chn %d pwr_meas_even_i = 0x%08x\n", i,
475 iEvenMeasOffset);
476 ath_print(common, ATH_DBG_CALIBRATE,
477 "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
478 qOddMeasOffset);
479 ath_print(common, ATH_DBG_CALIBRATE,
480 "Chn %d pwr_meas_even_q = 0x%08x\n", i,
481 qEvenMeasOffset);
482
483 if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
484 iGainMismatch =
485 ((iEvenMeasOffset * 32) /
486 iOddMeasOffset) & 0x3f;
487 qGainMismatch =
488 ((qOddMeasOffset * 32) /
489 qEvenMeasOffset) & 0x3f;
490
491 ath_print(common, ATH_DBG_CALIBRATE,
492 "Chn %d gain_mismatch_i = 0x%08x\n", i,
493 iGainMismatch);
494 ath_print(common, ATH_DBG_CALIBRATE,
495 "Chn %d gain_mismatch_q = 0x%08x\n", i,
496 qGainMismatch);
497
498 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
499 val &= 0xfffff000;
500 val |= (qGainMismatch) | (iGainMismatch << 6);
501 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
502
503 ath_print(common, ATH_DBG_CALIBRATE,
504 "ADC Gain Cal done for Chain %d\n", i);
505 }
506 }
507
508 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
509 REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
510 AR_PHY_NEW_ADC_GAIN_CORR_ENABLE);
511}
512
513static void ath9k_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
514{
515 struct ath_common *common = ath9k_hw_common(ah);
516 u32 iOddMeasOffset, iEvenMeasOffset, val, i;
517 int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
518 const struct ath9k_percal_data *calData =
519 ah->cal_list_curr->calData;
520 u32 numSamples =
521 (1 << (calData->calCountMax + 5)) * calData->calNumSamples;
522
523 for (i = 0; i < numChains; i++) {
524 iOddMeasOffset = ah->totalAdcDcOffsetIOddPhase[i];
525 iEvenMeasOffset = ah->totalAdcDcOffsetIEvenPhase[i];
526 qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i];
527 qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i];
528
529 ath_print(common, ATH_DBG_CALIBRATE,
530 "Starting ADC DC Offset Cal for Chain %d\n", i);
531
532 ath_print(common, ATH_DBG_CALIBRATE,
533 "Chn %d pwr_meas_odd_i = %d\n", i,
534 iOddMeasOffset);
535 ath_print(common, ATH_DBG_CALIBRATE,
536 "Chn %d pwr_meas_even_i = %d\n", i,
537 iEvenMeasOffset);
538 ath_print(common, ATH_DBG_CALIBRATE,
539 "Chn %d pwr_meas_odd_q = %d\n", i,
540 qOddMeasOffset);
541 ath_print(common, ATH_DBG_CALIBRATE,
542 "Chn %d pwr_meas_even_q = %d\n", i,
543 qEvenMeasOffset);
544
545 iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
546 numSamples) & 0x1ff;
547 qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
548 numSamples) & 0x1ff;
549
550 ath_print(common, ATH_DBG_CALIBRATE,
551 "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
552 iDcMismatch);
553 ath_print(common, ATH_DBG_CALIBRATE,
554 "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
555 qDcMismatch);
556
557 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
558 val &= 0xc0000fff;
559 val |= (qDcMismatch << 12) | (iDcMismatch << 21);
560 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
561
562 ath_print(common, ATH_DBG_CALIBRATE,
563 "ADC DC Offset Cal done for Chain %d\n", i);
564 }
565
566 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
567 REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
568 AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE);
569}
570
571/* This is done for the currently configured channel */ 130/* This is done for the currently configured channel */
572bool ath9k_hw_reset_calvalid(struct ath_hw *ah) 131bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
573{ 132{
@@ -614,72 +173,6 @@ void ath9k_hw_start_nfcal(struct ath_hw *ah)
614 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); 173 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
615} 174}
616 175
617void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
618{
619 struct ath9k_nfcal_hist *h;
620 int i, j;
621 int32_t val;
622 const u32 ar5416_cca_regs[6] = {
623 AR_PHY_CCA,
624 AR_PHY_CH1_CCA,
625 AR_PHY_CH2_CCA,
626 AR_PHY_EXT_CCA,
627 AR_PHY_CH1_EXT_CCA,
628 AR_PHY_CH2_EXT_CCA
629 };
630 u8 chainmask, rx_chain_status;
631
632 rx_chain_status = REG_READ(ah, AR_PHY_RX_CHAINMASK);
633 if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
634 chainmask = 0x9;
635 else if (AR_SREV_9280(ah) || AR_SREV_9287(ah)) {
636 if ((rx_chain_status & 0x2) || (rx_chain_status & 0x4))
637 chainmask = 0x1B;
638 else
639 chainmask = 0x09;
640 } else {
641 if (rx_chain_status & 0x4)
642 chainmask = 0x3F;
643 else if (rx_chain_status & 0x2)
644 chainmask = 0x1B;
645 else
646 chainmask = 0x09;
647 }
648
649 h = ah->nfCalHist;
650
651 for (i = 0; i < NUM_NF_READINGS; i++) {
652 if (chainmask & (1 << i)) {
653 val = REG_READ(ah, ar5416_cca_regs[i]);
654 val &= 0xFFFFFE00;
655 val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
656 REG_WRITE(ah, ar5416_cca_regs[i], val);
657 }
658 }
659
660 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
661 AR_PHY_AGC_CONTROL_ENABLE_NF);
662 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
663 AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
664 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
665
666 for (j = 0; j < 5; j++) {
667 if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
668 AR_PHY_AGC_CONTROL_NF) == 0)
669 break;
670 udelay(50);
671 }
672
673 for (i = 0; i < NUM_NF_READINGS; i++) {
674 if (chainmask & (1 << i)) {
675 val = REG_READ(ah, ar5416_cca_regs[i]);
676 val &= 0xFFFFFE00;
677 val |= (((u32) (-50) << 1) & 0x1ff);
678 REG_WRITE(ah, ar5416_cca_regs[i], val);
679 }
680 }
681}
682
683int16_t ath9k_hw_getnf(struct ath_hw *ah, 176int16_t ath9k_hw_getnf(struct ath_hw *ah,
684 struct ath9k_channel *chan) 177 struct ath9k_channel *chan)
685{ 178{
@@ -699,7 +192,7 @@ int16_t ath9k_hw_getnf(struct ath_hw *ah,
699 } else { 192 } else {
700 ath9k_hw_do_getnf(ah, nfarray); 193 ath9k_hw_do_getnf(ah, nfarray);
701 nf = nfarray[0]; 194 nf = nfarray[0];
702 if (getNoiseFloorThresh(ah, c->band, &nfThresh) 195 if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh)
703 && nf > nfThresh) { 196 && nf > nfThresh) {
704 ath_print(common, ATH_DBG_CALIBRATE, 197 ath_print(common, ATH_DBG_CALIBRATE,
705 "noise floor failed detected; " 198 "noise floor failed detected; "
@@ -757,567 +250,3 @@ s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
757 return nf; 250 return nf;
758} 251}
759EXPORT_SYMBOL(ath9k_hw_getchan_noise); 252EXPORT_SYMBOL(ath9k_hw_getchan_noise);
760
761static void ath9k_olc_temp_compensation_9287(struct ath_hw *ah)
762{
763 u32 rddata;
764 int32_t delta, currPDADC, slope;
765
766 rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
767 currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
768
769 if (ah->initPDADC == 0 || currPDADC == 0) {
770 /*
771 * Zero value indicates that no frames have been transmitted yet,
772 * can't do temperature compensation until frames are transmitted.
773 */
774 return;
775 } else {
776 slope = ah->eep_ops->get_eeprom(ah, EEP_TEMPSENSE_SLOPE);
777
778 if (slope == 0) { /* to avoid divide by zero case */
779 delta = 0;
780 } else {
781 delta = ((currPDADC - ah->initPDADC)*4) / slope;
782 }
783 REG_RMW_FIELD(ah, AR_PHY_CH0_TX_PWRCTRL11,
784 AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
785 REG_RMW_FIELD(ah, AR_PHY_CH1_TX_PWRCTRL11,
786 AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
787 }
788}
789
790static void ath9k_olc_temp_compensation(struct ath_hw *ah)
791{
792 u32 rddata, i;
793 int delta, currPDADC, regval;
794
795 if (OLC_FOR_AR9287_10_LATER) {
796 ath9k_olc_temp_compensation_9287(ah);
797 } else {
798 rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
799 currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
800
801 if (ah->initPDADC == 0 || currPDADC == 0) {
802 return;
803 } else {
804 if (ah->eep_ops->get_eeprom(ah, EEP_DAC_HPWR_5G))
805 delta = (currPDADC - ah->initPDADC + 4) / 8;
806 else
807 delta = (currPDADC - ah->initPDADC + 5) / 10;
808
809 if (delta != ah->PDADCdelta) {
810 ah->PDADCdelta = delta;
811 for (i = 1; i < AR9280_TX_GAIN_TABLE_SIZE; i++) {
812 regval = ah->originalGain[i] - delta;
813 if (regval < 0)
814 regval = 0;
815
816 REG_RMW_FIELD(ah,
817 AR_PHY_TX_GAIN_TBL1 + i * 4,
818 AR_PHY_TX_GAIN, regval);
819 }
820 }
821 }
822 }
823}
824
825static void ath9k_hw_9271_pa_cal(struct ath_hw *ah, bool is_reset)
826{
827 u32 regVal;
828 unsigned int i;
829 u32 regList [][2] = {
830 { 0x786c, 0 },
831 { 0x7854, 0 },
832 { 0x7820, 0 },
833 { 0x7824, 0 },
834 { 0x7868, 0 },
835 { 0x783c, 0 },
836 { 0x7838, 0 } ,
837 { 0x7828, 0 } ,
838 };
839
840 for (i = 0; i < ARRAY_SIZE(regList); i++)
841 regList[i][1] = REG_READ(ah, regList[i][0]);
842
843 regVal = REG_READ(ah, 0x7834);
844 regVal &= (~(0x1));
845 REG_WRITE(ah, 0x7834, regVal);
846 regVal = REG_READ(ah, 0x9808);
847 regVal |= (0x1 << 27);
848 REG_WRITE(ah, 0x9808, regVal);
849
850 /* 786c,b23,1, pwddac=1 */
851 REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
852 /* 7854, b5,1, pdrxtxbb=1 */
853 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
854 /* 7854, b7,1, pdv2i=1 */
855 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
856 /* 7854, b8,1, pddacinterface=1 */
857 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
858 /* 7824,b12,0, offcal=0 */
859 REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
860 /* 7838, b1,0, pwddb=0 */
861 REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
862 /* 7820,b11,0, enpacal=0 */
863 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
864 /* 7820,b25,1, pdpadrv1=0 */
865 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
866 /* 7820,b24,0, pdpadrv2=0 */
867 REG_RMW_FIELD(ah, AR9285_AN_RF2G1,AR9285_AN_RF2G1_PDPADRV2,0);
868 /* 7820,b23,0, pdpaout=0 */
869 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
870 /* 783c,b14-16,7, padrvgn2tab_0=7 */
871 REG_RMW_FIELD(ah, AR9285_AN_RF2G8,AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
872 /*
873 * 7838,b29-31,0, padrvgn1tab_0=0
874 * does not matter since we turn it off
875 */
876 REG_RMW_FIELD(ah, AR9285_AN_RF2G7,AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
877
878 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_CCOMP, 0xfff);
879
880 /* Set:
881 * localmode=1,bmode=1,bmoderxtx=1,synthon=1,
882 * txon=1,paon=1,oscon=1,synthon_force=1
883 */
884 REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
885 udelay(30);
886 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0);
887
888 /* find off_6_1; */
889 for (i = 6; i > 0; i--) {
890 regVal = REG_READ(ah, 0x7834);
891 regVal |= (1 << (20 + i));
892 REG_WRITE(ah, 0x7834, regVal);
893 udelay(1);
894 //regVal = REG_READ(ah, 0x7834);
895 regVal &= (~(0x1 << (20 + i)));
896 regVal |= (MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9)
897 << (20 + i));
898 REG_WRITE(ah, 0x7834, regVal);
899 }
900
901 regVal = (regVal >>20) & 0x7f;
902
903 /* Update PA cal info */
904 if ((!is_reset) && (ah->pacal_info.prev_offset == regVal)) {
905 if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
906 ah->pacal_info.max_skipcount =
907 2 * ah->pacal_info.max_skipcount;
908 ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
909 } else {
910 ah->pacal_info.max_skipcount = 1;
911 ah->pacal_info.skipcount = 0;
912 ah->pacal_info.prev_offset = regVal;
913 }
914
915 regVal = REG_READ(ah, 0x7834);
916 regVal |= 0x1;
917 REG_WRITE(ah, 0x7834, regVal);
918 regVal = REG_READ(ah, 0x9808);
919 regVal &= (~(0x1 << 27));
920 REG_WRITE(ah, 0x9808, regVal);
921
922 for (i = 0; i < ARRAY_SIZE(regList); i++)
923 REG_WRITE(ah, regList[i][0], regList[i][1]);
924}
925
926static inline void ath9k_hw_9285_pa_cal(struct ath_hw *ah, bool is_reset)
927{
928 struct ath_common *common = ath9k_hw_common(ah);
929 u32 regVal;
930 int i, offset, offs_6_1, offs_0;
931 u32 ccomp_org, reg_field;
932 u32 regList[][2] = {
933 { 0x786c, 0 },
934 { 0x7854, 0 },
935 { 0x7820, 0 },
936 { 0x7824, 0 },
937 { 0x7868, 0 },
938 { 0x783c, 0 },
939 { 0x7838, 0 },
940 };
941
942 ath_print(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n");
943
944 /* PA CAL is not needed for high power solution */
945 if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) ==
946 AR5416_EEP_TXGAIN_HIGH_POWER)
947 return;
948
949 if (AR_SREV_9285_11(ah)) {
950 REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14));
951 udelay(10);
952 }
953
954 for (i = 0; i < ARRAY_SIZE(regList); i++)
955 regList[i][1] = REG_READ(ah, regList[i][0]);
956
957 regVal = REG_READ(ah, 0x7834);
958 regVal &= (~(0x1));
959 REG_WRITE(ah, 0x7834, regVal);
960 regVal = REG_READ(ah, 0x9808);
961 regVal |= (0x1 << 27);
962 REG_WRITE(ah, 0x9808, regVal);
963
964 REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
965 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
966 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
967 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
968 REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
969 REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
970 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
971 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
972 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
973 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
974 REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
975 REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
976 ccomp_org = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_CCOMP);
977 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, 0xf);
978
979 REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
980 udelay(30);
981 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, 0);
982 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 0);
983
984 for (i = 6; i > 0; i--) {
985 regVal = REG_READ(ah, 0x7834);
986 regVal |= (1 << (19 + i));
987 REG_WRITE(ah, 0x7834, regVal);
988 udelay(1);
989 regVal = REG_READ(ah, 0x7834);
990 regVal &= (~(0x1 << (19 + i)));
991 reg_field = MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9);
992 regVal |= (reg_field << (19 + i));
993 REG_WRITE(ah, 0x7834, regVal);
994 }
995
996 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 1);
997 udelay(1);
998 reg_field = MS(REG_READ(ah, AR9285_AN_RF2G9), AR9285_AN_RXTXBB1_SPARE9);
999 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, reg_field);
1000 offs_6_1 = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_OFFS);
1001 offs_0 = MS(REG_READ(ah, AR9285_AN_RF2G3), AR9285_AN_RF2G3_PDVCCOMP);
1002
1003 offset = (offs_6_1<<1) | offs_0;
1004 offset = offset - 0;
1005 offs_6_1 = offset>>1;
1006 offs_0 = offset & 1;
1007
1008 if ((!is_reset) && (ah->pacal_info.prev_offset == offset)) {
1009 if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
1010 ah->pacal_info.max_skipcount =
1011 2 * ah->pacal_info.max_skipcount;
1012 ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
1013 } else {
1014 ah->pacal_info.max_skipcount = 1;
1015 ah->pacal_info.skipcount = 0;
1016 ah->pacal_info.prev_offset = offset;
1017 }
1018
1019 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, offs_6_1);
1020 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, offs_0);
1021
1022 regVal = REG_READ(ah, 0x7834);
1023 regVal |= 0x1;
1024 REG_WRITE(ah, 0x7834, regVal);
1025 regVal = REG_READ(ah, 0x9808);
1026 regVal &= (~(0x1 << 27));
1027 REG_WRITE(ah, 0x9808, regVal);
1028
1029 for (i = 0; i < ARRAY_SIZE(regList); i++)
1030 REG_WRITE(ah, regList[i][0], regList[i][1]);
1031
1032 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, ccomp_org);
1033
1034 if (AR_SREV_9285_11(ah))
1035 REG_WRITE(ah, AR9285_AN_TOP4, AR9285_AN_TOP4_DEFAULT);
1036
1037}
1038
1039bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
1040 u8 rxchainmask, bool longcal)
1041{
1042 bool iscaldone = true;
1043 struct ath9k_cal_list *currCal = ah->cal_list_curr;
1044
1045 if (currCal &&
1046 (currCal->calState == CAL_RUNNING ||
1047 currCal->calState == CAL_WAITING)) {
1048 iscaldone = ath9k_hw_per_calibration(ah, chan,
1049 rxchainmask, currCal);
1050 if (iscaldone) {
1051 ah->cal_list_curr = currCal = currCal->calNext;
1052
1053 if (currCal->calState == CAL_WAITING) {
1054 iscaldone = false;
1055 ath9k_hw_reset_calibration(ah, currCal);
1056 }
1057 }
1058 }
1059
1060 /* Do NF cal only at longer intervals */
1061 if (longcal) {
1062 /* Do periodic PAOffset Cal */
1063 if (AR_SREV_9271(ah)) {
1064 if (!ah->pacal_info.skipcount)
1065 ath9k_hw_9271_pa_cal(ah, false);
1066 else
1067 ah->pacal_info.skipcount--;
1068 } else if (AR_SREV_9285_11_OR_LATER(ah)) {
1069 if (!ah->pacal_info.skipcount)
1070 ath9k_hw_9285_pa_cal(ah, false);
1071 else
1072 ah->pacal_info.skipcount--;
1073 }
1074
1075 if (OLC_FOR_AR9280_20_LATER || OLC_FOR_AR9287_10_LATER)
1076 ath9k_olc_temp_compensation(ah);
1077
1078 /* Get the value from the previous NF cal and update history buffer */
1079 ath9k_hw_getnf(ah, chan);
1080
1081 /*
1082 * Load the NF from history buffer of the current channel.
1083 * NF is slow time-variant, so it is OK to use a historical value.
1084 */
1085 ath9k_hw_loadnf(ah, ah->curchan);
1086
1087 ath9k_hw_start_nfcal(ah);
1088 }
1089
1090 return iscaldone;
1091}
1092EXPORT_SYMBOL(ath9k_hw_calibrate);
1093
1094/* Carrier leakage Calibration fix */
1095static bool ar9285_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
1096{
1097 struct ath_common *common = ath9k_hw_common(ah);
1098
1099 REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
1100 if (IS_CHAN_HT20(chan)) {
1101 REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
1102 REG_SET_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
1103 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
1104 AR_PHY_AGC_CONTROL_FLTR_CAL);
1105 REG_CLR_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
1106 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
1107 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
1108 AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
1109 ath_print(common, ATH_DBG_CALIBRATE, "offset "
1110 "calibration failed to complete in "
1111 "1ms; noisy ??\n");
1112 return false;
1113 }
1114 REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
1115 REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
1116 REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
1117 }
1118 REG_CLR_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
1119 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
1120 REG_SET_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
1121 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
1122 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
1123 0, AH_WAIT_TIMEOUT)) {
1124 ath_print(common, ATH_DBG_CALIBRATE, "offset calibration "
1125 "failed to complete in 1ms; noisy ??\n");
1126 return false;
1127 }
1128
1129 REG_SET_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
1130 REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
1131 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
1132
1133 return true;
1134}
1135
1136static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
1137{
1138 int i;
1139 u_int32_t txgain_max;
1140 u_int32_t clc_gain, gain_mask = 0, clc_num = 0;
1141 u_int32_t reg_clc_I0, reg_clc_Q0;
1142 u_int32_t i0_num = 0;
1143 u_int32_t q0_num = 0;
1144 u_int32_t total_num = 0;
1145 u_int32_t reg_rf2g5_org;
1146 bool retv = true;
1147
1148 if (!(ar9285_cl_cal(ah, chan)))
1149 return false;
1150
1151 txgain_max = MS(REG_READ(ah, AR_PHY_TX_PWRCTRL7),
1152 AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX);
1153
1154 for (i = 0; i < (txgain_max+1); i++) {
1155 clc_gain = (REG_READ(ah, (AR_PHY_TX_GAIN_TBL1+(i<<2))) &
1156 AR_PHY_TX_GAIN_CLC) >> AR_PHY_TX_GAIN_CLC_S;
1157 if (!(gain_mask & (1 << clc_gain))) {
1158 gain_mask |= (1 << clc_gain);
1159 clc_num++;
1160 }
1161 }
1162
1163 for (i = 0; i < clc_num; i++) {
1164 reg_clc_I0 = (REG_READ(ah, (AR_PHY_CLC_TBL1 + (i << 2)))
1165 & AR_PHY_CLC_I0) >> AR_PHY_CLC_I0_S;
1166 reg_clc_Q0 = (REG_READ(ah, (AR_PHY_CLC_TBL1 + (i << 2)))
1167 & AR_PHY_CLC_Q0) >> AR_PHY_CLC_Q0_S;
1168 if (reg_clc_I0 == 0)
1169 i0_num++;
1170
1171 if (reg_clc_Q0 == 0)
1172 q0_num++;
1173 }
1174 total_num = i0_num + q0_num;
1175 if (total_num > AR9285_CLCAL_REDO_THRESH) {
1176 reg_rf2g5_org = REG_READ(ah, AR9285_RF2G5);
1177 if (AR_SREV_9285E_20(ah)) {
1178 REG_WRITE(ah, AR9285_RF2G5,
1179 (reg_rf2g5_org & AR9285_RF2G5_IC50TX) |
1180 AR9285_RF2G5_IC50TX_XE_SET);
1181 } else {
1182 REG_WRITE(ah, AR9285_RF2G5,
1183 (reg_rf2g5_org & AR9285_RF2G5_IC50TX) |
1184 AR9285_RF2G5_IC50TX_SET);
1185 }
1186 retv = ar9285_cl_cal(ah, chan);
1187 REG_WRITE(ah, AR9285_RF2G5, reg_rf2g5_org);
1188 }
1189 return retv;
1190}
1191
1192bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
1193{
1194 struct ath_common *common = ath9k_hw_common(ah);
1195
1196 if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) {
1197 if (!ar9285_clc(ah, chan))
1198 return false;
1199 } else {
1200 if (AR_SREV_9280_10_OR_LATER(ah)) {
1201 if (!AR_SREV_9287_10_OR_LATER(ah))
1202 REG_CLR_BIT(ah, AR_PHY_ADC_CTL,
1203 AR_PHY_ADC_CTL_OFF_PWDADC);
1204 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
1205 AR_PHY_AGC_CONTROL_FLTR_CAL);
1206 }
1207
1208 /* Calibrate the AGC */
1209 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
1210 REG_READ(ah, AR_PHY_AGC_CONTROL) |
1211 AR_PHY_AGC_CONTROL_CAL);
1212
1213 /* Poll for offset calibration complete */
1214 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
1215 0, AH_WAIT_TIMEOUT)) {
1216 ath_print(common, ATH_DBG_CALIBRATE,
1217 "offset calibration failed to "
1218 "complete in 1ms; noisy environment?\n");
1219 return false;
1220 }
1221
1222 if (AR_SREV_9280_10_OR_LATER(ah)) {
1223 if (!AR_SREV_9287_10_OR_LATER(ah))
1224 REG_SET_BIT(ah, AR_PHY_ADC_CTL,
1225 AR_PHY_ADC_CTL_OFF_PWDADC);
1226 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
1227 AR_PHY_AGC_CONTROL_FLTR_CAL);
1228 }
1229 }
1230
1231 /* Do PA Calibration */
1232 if (AR_SREV_9271(ah))
1233 ath9k_hw_9271_pa_cal(ah, true);
1234 else if (AR_SREV_9285_11_OR_LATER(ah))
1235 ath9k_hw_9285_pa_cal(ah, true);
1236
1237 /* Do NF Calibration after DC offset and other calibrations */
1238 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
1239 REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_NF);
1240
1241 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
1242
1243 /* Enable IQ, ADC Gain and ADC DC offset CALs */
1244 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
1245 if (ath9k_hw_iscal_supported(ah, ADC_GAIN_CAL)) {
1246 INIT_CAL(&ah->adcgain_caldata);
1247 INSERT_CAL(ah, &ah->adcgain_caldata);
1248 ath_print(common, ATH_DBG_CALIBRATE,
1249 "enabling ADC Gain Calibration.\n");
1250 }
1251 if (ath9k_hw_iscal_supported(ah, ADC_DC_CAL)) {
1252 INIT_CAL(&ah->adcdc_caldata);
1253 INSERT_CAL(ah, &ah->adcdc_caldata);
1254 ath_print(common, ATH_DBG_CALIBRATE,
1255 "enabling ADC DC Calibration.\n");
1256 }
1257 if (ath9k_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) {
1258 INIT_CAL(&ah->iq_caldata);
1259 INSERT_CAL(ah, &ah->iq_caldata);
1260 ath_print(common, ATH_DBG_CALIBRATE,
1261 "enabling IQ Calibration.\n");
1262 }
1263
1264 ah->cal_list_curr = ah->cal_list;
1265
1266 if (ah->cal_list_curr)
1267 ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
1268 }
1269
1270 chan->CalValid = 0;
1271
1272 return true;
1273}
1274
1275const struct ath9k_percal_data iq_cal_multi_sample = {
1276 IQ_MISMATCH_CAL,
1277 MAX_CAL_SAMPLES,
1278 PER_MIN_LOG_COUNT,
1279 ath9k_hw_iqcal_collect,
1280 ath9k_hw_iqcalibrate
1281};
1282const struct ath9k_percal_data iq_cal_single_sample = {
1283 IQ_MISMATCH_CAL,
1284 MIN_CAL_SAMPLES,
1285 PER_MAX_LOG_COUNT,
1286 ath9k_hw_iqcal_collect,
1287 ath9k_hw_iqcalibrate
1288};
1289const struct ath9k_percal_data adc_gain_cal_multi_sample = {
1290 ADC_GAIN_CAL,
1291 MAX_CAL_SAMPLES,
1292 PER_MIN_LOG_COUNT,
1293 ath9k_hw_adc_gaincal_collect,
1294 ath9k_hw_adc_gaincal_calibrate
1295};
1296const struct ath9k_percal_data adc_gain_cal_single_sample = {
1297 ADC_GAIN_CAL,
1298 MIN_CAL_SAMPLES,
1299 PER_MAX_LOG_COUNT,
1300 ath9k_hw_adc_gaincal_collect,
1301 ath9k_hw_adc_gaincal_calibrate
1302};
1303const struct ath9k_percal_data adc_dc_cal_multi_sample = {
1304 ADC_DC_CAL,
1305 MAX_CAL_SAMPLES,
1306 PER_MIN_LOG_COUNT,
1307 ath9k_hw_adc_dccal_collect,
1308 ath9k_hw_adc_dccal_calibrate
1309};
1310const struct ath9k_percal_data adc_dc_cal_single_sample = {
1311 ADC_DC_CAL,
1312 MIN_CAL_SAMPLES,
1313 PER_MAX_LOG_COUNT,
1314 ath9k_hw_adc_dccal_collect,
1315 ath9k_hw_adc_dccal_calibrate
1316};
1317const struct ath9k_percal_data adc_init_dc_cal = {
1318 ADC_DC_INIT_CAL,
1319 MIN_CAL_SAMPLES,
1320 INIT_LOG_COUNT,
1321 ath9k_hw_adc_dccal_collect,
1322 ath9k_hw_adc_dccal_calibrate
1323};
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index b2c873e97485..24538bdb9126 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -19,14 +19,6 @@
19 19
20#include "hw.h" 20#include "hw.h"
21 21
22extern const struct ath9k_percal_data iq_cal_multi_sample;
23extern const struct ath9k_percal_data iq_cal_single_sample;
24extern const struct ath9k_percal_data adc_gain_cal_multi_sample;
25extern const struct ath9k_percal_data adc_gain_cal_single_sample;
26extern const struct ath9k_percal_data adc_dc_cal_multi_sample;
27extern const struct ath9k_percal_data adc_dc_cal_single_sample;
28extern const struct ath9k_percal_data adc_init_dc_cal;
29
30#define AR_PHY_CCA_MAX_AR5416_GOOD_VALUE -85 22#define AR_PHY_CCA_MAX_AR5416_GOOD_VALUE -85
31#define AR_PHY_CCA_MAX_AR9280_GOOD_VALUE -112 23#define AR_PHY_CCA_MAX_AR9280_GOOD_VALUE -112
32#define AR_PHY_CCA_MAX_AR9285_GOOD_VALUE -118 24#define AR_PHY_CCA_MAX_AR9285_GOOD_VALUE -118
@@ -76,7 +68,8 @@ enum ath9k_cal_types {
76 ADC_DC_INIT_CAL = 0x1, 68 ADC_DC_INIT_CAL = 0x1,
77 ADC_GAIN_CAL = 0x2, 69 ADC_GAIN_CAL = 0x2,
78 ADC_DC_CAL = 0x4, 70 ADC_DC_CAL = 0x4,
79 IQ_MISMATCH_CAL = 0x8 71 IQ_MISMATCH_CAL = 0x8,
72 TEMP_COMP_CAL = 0x10,
80}; 73};
81 74
82enum ath9k_cal_state { 75enum ath9k_cal_state {
@@ -122,14 +115,12 @@ struct ath9k_pacal_info{
122 115
123bool ath9k_hw_reset_calvalid(struct ath_hw *ah); 116bool ath9k_hw_reset_calvalid(struct ath_hw *ah);
124void ath9k_hw_start_nfcal(struct ath_hw *ah); 117void ath9k_hw_start_nfcal(struct ath_hw *ah);
125void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan);
126int16_t ath9k_hw_getnf(struct ath_hw *ah, 118int16_t ath9k_hw_getnf(struct ath_hw *ah,
127 struct ath9k_channel *chan); 119 struct ath9k_channel *chan);
128void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah); 120void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah);
129s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan); 121s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan);
130bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan, 122void ath9k_hw_reset_calibration(struct ath_hw *ah,
131 u8 rxchainmask, bool longcal); 123 struct ath9k_cal_list *currCal);
132bool ath9k_hw_init_cal(struct ath_hw *ah, 124
133 struct ath9k_channel *chan);
134 125
135#endif /* CALIB_H */ 126#endif /* CALIB_H */
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 09effdedc8c0..b4424a623cf5 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -212,7 +212,6 @@ int ath9k_cmn_rx_skb_preprocess(struct ath_common *common,
212 rx_status->mactime = ath9k_hw_extend_tsf(ah, rx_stats->rs_tstamp); 212 rx_status->mactime = ath9k_hw_extend_tsf(ah, rx_stats->rs_tstamp);
213 rx_status->band = hw->conf.channel->band; 213 rx_status->band = hw->conf.channel->band;
214 rx_status->freq = hw->conf.channel->center_freq; 214 rx_status->freq = hw->conf.channel->center_freq;
215 rx_status->noise = common->ani.noise_floor;
216 rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi; 215 rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
217 rx_status->antenna = rx_stats->rs_antenna; 216 rx_status->antenna = rx_stats->rs_antenna;
218 rx_status->flag |= RX_FLAG_TSFT; 217 rx_status->flag |= RX_FLAG_TSFT;
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 72a835d9e97f..e08f7e5a26e0 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -20,6 +20,7 @@
20#include "../debug.h" 20#include "../debug.h"
21 21
22#include "hw.h" 22#include "hw.h"
23#include "hw-ops.h"
23 24
24/* Common header for Atheros 802.11n base driver cores */ 25/* Common header for Atheros 802.11n base driver cores */
25 26
@@ -76,11 +77,12 @@ struct ath_buf {
76 an aggregate) */ 77 an aggregate) */
77 struct ath_buf *bf_next; /* next subframe in the aggregate */ 78 struct ath_buf *bf_next; /* next subframe in the aggregate */
78 struct sk_buff *bf_mpdu; /* enclosing frame structure */ 79 struct sk_buff *bf_mpdu; /* enclosing frame structure */
79 struct ath_desc *bf_desc; /* virtual addr of desc */ 80 void *bf_desc; /* virtual addr of desc */
80 dma_addr_t bf_daddr; /* physical addr of desc */ 81 dma_addr_t bf_daddr; /* physical addr of desc */
81 dma_addr_t bf_buf_addr; /* physical addr of data buffer */ 82 dma_addr_t bf_buf_addr; /* physical addr of data buffer */
82 bool bf_stale; 83 bool bf_stale;
83 bool bf_isnullfunc; 84 bool bf_isnullfunc;
85 bool bf_tx_aborted;
84 u16 bf_flags; 86 u16 bf_flags;
85 struct ath_buf_state bf_state; 87 struct ath_buf_state bf_state;
86 dma_addr_t bf_dmacontext; 88 dma_addr_t bf_dmacontext;
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 9a8e419398f9..64e30cd45d05 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -180,8 +180,15 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
180{ 180{
181 if (status) 181 if (status)
182 sc->debug.stats.istats.total++; 182 sc->debug.stats.istats.total++;
183 if (status & ATH9K_INT_RX) 183 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
184 sc->debug.stats.istats.rxok++; 184 if (status & ATH9K_INT_RXLP)
185 sc->debug.stats.istats.rxlp++;
186 if (status & ATH9K_INT_RXHP)
187 sc->debug.stats.istats.rxhp++;
188 } else {
189 if (status & ATH9K_INT_RX)
190 sc->debug.stats.istats.rxok++;
191 }
185 if (status & ATH9K_INT_RXEOL) 192 if (status & ATH9K_INT_RXEOL)
186 sc->debug.stats.istats.rxeol++; 193 sc->debug.stats.istats.rxeol++;
187 if (status & ATH9K_INT_RXORN) 194 if (status & ATH9K_INT_RXORN)
@@ -223,8 +230,15 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
223 char buf[512]; 230 char buf[512];
224 unsigned int len = 0; 231 unsigned int len = 0;
225 232
226 len += snprintf(buf + len, sizeof(buf) - len, 233 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
227 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok); 234 len += snprintf(buf + len, sizeof(buf) - len,
235 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
236 len += snprintf(buf + len, sizeof(buf) - len,
237 "%8s: %10u\n", "RXHP", sc->debug.stats.istats.rxhp);
238 } else {
239 len += snprintf(buf + len, sizeof(buf) - len,
240 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
241 }
228 len += snprintf(buf + len, sizeof(buf) - len, 242 len += snprintf(buf + len, sizeof(buf) - len,
229 "%8s: %10u\n", "RXEOL", sc->debug.stats.istats.rxeol); 243 "%8s: %10u\n", "RXEOL", sc->debug.stats.istats.rxeol);
230 len += snprintf(buf + len, sizeof(buf) - len, 244 len += snprintf(buf + len, sizeof(buf) - len,
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index b2af9de755e6..c545960e7ec5 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -35,6 +35,8 @@ struct ath_buf;
35 * struct ath_interrupt_stats - Contains statistics about interrupts 35 * struct ath_interrupt_stats - Contains statistics about interrupts
36 * @total: Total no. of interrupts generated so far 36 * @total: Total no. of interrupts generated so far
37 * @rxok: RX with no errors 37 * @rxok: RX with no errors
38 * @rxlp: RX with low priority RX
39 * @rxhp: RX with high priority, uapsd only
38 * @rxeol: RX with no more RXDESC available 40 * @rxeol: RX with no more RXDESC available
39 * @rxorn: RX FIFO overrun 41 * @rxorn: RX FIFO overrun
40 * @txok: TX completed at the requested rate 42 * @txok: TX completed at the requested rate
@@ -55,6 +57,8 @@ struct ath_buf;
55struct ath_interrupt_stats { 57struct ath_interrupt_stats {
56 u32 total; 58 u32 total;
57 u32 rxok; 59 u32 rxok;
60 u32 rxlp;
61 u32 rxhp;
58 u32 rxeol; 62 u32 rxeol;
59 u32 rxorn; 63 u32 rxorn;
60 u32 txok; 64 u32 txok;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index dacaae934148..bd9dff3293dc 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -256,14 +256,13 @@ int ath9k_hw_eeprom_init(struct ath_hw *ah)
256{ 256{
257 int status; 257 int status;
258 258
259 if (AR_SREV_9287(ah)) { 259 if (AR_SREV_9300_20_OR_LATER(ah))
260 ah->eep_map = EEP_MAP_AR9287; 260 ah->eep_ops = &eep_ar9300_ops;
261 ah->eep_ops = &eep_AR9287_ops; 261 else if (AR_SREV_9287(ah)) {
262 ah->eep_ops = &eep_ar9287_ops;
262 } else if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) { 263 } else if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) {
263 ah->eep_map = EEP_MAP_4KBITS;
264 ah->eep_ops = &eep_4k_ops; 264 ah->eep_ops = &eep_4k_ops;
265 } else { 265 } else {
266 ah->eep_map = EEP_MAP_DEFAULT;
267 ah->eep_ops = &eep_def_ops; 266 ah->eep_ops = &eep_def_ops;
268 } 267 }
269 268
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 2f2993b50e2f..21354c15a9a9 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -19,6 +19,7 @@
19 19
20#include "../ath.h" 20#include "../ath.h"
21#include <net/cfg80211.h> 21#include <net/cfg80211.h>
22#include "ar9003_eeprom.h"
22 23
23#define AH_USE_EEPROM 0x1 24#define AH_USE_EEPROM 0x1
24 25
@@ -93,7 +94,6 @@
93 */ 94 */
94#define AR9285_RDEXT_DEFAULT 0x1F 95#define AR9285_RDEXT_DEFAULT 0x1F
95 96
96#define AR_EEPROM_MAC(i) (0x1d+(i))
97#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s)) 97#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
98#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5)) 98#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
99#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM)) 99#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM))
@@ -155,6 +155,7 @@
155#define AR5416_BCHAN_UNUSED 0xFF 155#define AR5416_BCHAN_UNUSED 0xFF
156#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64 156#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64
157#define AR5416_MAX_CHAINS 3 157#define AR5416_MAX_CHAINS 3
158#define AR9300_MAX_CHAINS 3
158#define AR5416_PWR_TABLE_OFFSET_DB -5 159#define AR5416_PWR_TABLE_OFFSET_DB -5
159 160
160/* Rx gain type values */ 161/* Rx gain type values */
@@ -249,16 +250,20 @@ enum eeprom_param {
249 EEP_MINOR_REV, 250 EEP_MINOR_REV,
250 EEP_TX_MASK, 251 EEP_TX_MASK,
251 EEP_RX_MASK, 252 EEP_RX_MASK,
253 EEP_FSTCLK_5G,
252 EEP_RXGAIN_TYPE, 254 EEP_RXGAIN_TYPE,
253 EEP_TXGAIN_TYPE,
254 EEP_OL_PWRCTRL, 255 EEP_OL_PWRCTRL,
256 EEP_TXGAIN_TYPE,
255 EEP_RC_CHAIN_MASK, 257 EEP_RC_CHAIN_MASK,
256 EEP_DAC_HPWR_5G, 258 EEP_DAC_HPWR_5G,
257 EEP_FRAC_N_5G, 259 EEP_FRAC_N_5G,
258 EEP_DEV_TYPE, 260 EEP_DEV_TYPE,
259 EEP_TEMPSENSE_SLOPE, 261 EEP_TEMPSENSE_SLOPE,
260 EEP_TEMPSENSE_SLOPE_PAL_ON, 262 EEP_TEMPSENSE_SLOPE_PAL_ON,
261 EEP_PWR_TABLE_OFFSET 263 EEP_PWR_TABLE_OFFSET,
264 EEP_DRIVE_STRENGTH,
265 EEP_INTERNAL_REGULATOR,
266 EEP_SWREG
262}; 267};
263 268
264enum ar5416_rates { 269enum ar5416_rates {
@@ -295,7 +300,8 @@ struct base_eep_header {
295 u32 binBuildNumber; 300 u32 binBuildNumber;
296 u8 deviceType; 301 u8 deviceType;
297 u8 pwdclkind; 302 u8 pwdclkind;
298 u8 futureBase_1[2]; 303 u8 fastClk5g;
304 u8 divChain;
299 u8 rxGainType; 305 u8 rxGainType;
300 u8 dacHiPwrMode_5G; 306 u8 dacHiPwrMode_5G;
301 u8 openLoopPwrCntl; 307 u8 openLoopPwrCntl;
@@ -656,13 +662,6 @@ struct ath9k_country_entry {
656 u8 iso[3]; 662 u8 iso[3];
657}; 663};
658 664
659enum ath9k_eep_map {
660 EEP_MAP_DEFAULT = 0x0,
661 EEP_MAP_4KBITS,
662 EEP_MAP_AR9287,
663 EEP_MAP_MAX
664};
665
666struct eeprom_ops { 665struct eeprom_ops {
667 int (*check_eeprom)(struct ath_hw *hw); 666 int (*check_eeprom)(struct ath_hw *hw);
668 u32 (*get_eeprom)(struct ath_hw *hw, enum eeprom_param param); 667 u32 (*get_eeprom)(struct ath_hw *hw, enum eeprom_param param);
@@ -713,6 +712,8 @@ int ath9k_hw_eeprom_init(struct ath_hw *ah);
713 712
714extern const struct eeprom_ops eep_def_ops; 713extern const struct eeprom_ops eep_def_ops;
715extern const struct eeprom_ops eep_4k_ops; 714extern const struct eeprom_ops eep_4k_ops;
716extern const struct eeprom_ops eep_AR9287_ops; 715extern const struct eeprom_ops eep_ar9287_ops;
716extern const struct eeprom_ops eep_ar9287_ops;
717extern const struct eeprom_ops eep_ar9300_ops;
717 718
718#endif /* EEPROM_H */ 719#endif /* EEPROM_H */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 0354fe50f8e0..41a77d1bd439 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include "hw.h" 17#include "hw.h"
18#include "ar9002_phy.h"
18 19
19static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah) 20static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah)
20{ 21{
@@ -182,11 +183,11 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
182 switch (param) { 183 switch (param) {
183 case EEP_NFTHRESH_2: 184 case EEP_NFTHRESH_2:
184 return pModal->noiseFloorThreshCh[0]; 185 return pModal->noiseFloorThreshCh[0];
185 case AR_EEPROM_MAC(0): 186 case EEP_MAC_LSW:
186 return pBase->macAddr[0] << 8 | pBase->macAddr[1]; 187 return pBase->macAddr[0] << 8 | pBase->macAddr[1];
187 case AR_EEPROM_MAC(1): 188 case EEP_MAC_MID:
188 return pBase->macAddr[2] << 8 | pBase->macAddr[3]; 189 return pBase->macAddr[2] << 8 | pBase->macAddr[3];
189 case AR_EEPROM_MAC(2): 190 case EEP_MAC_MSW:
190 return pBase->macAddr[4] << 8 | pBase->macAddr[5]; 191 return pBase->macAddr[4] << 8 | pBase->macAddr[5];
191 case EEP_REG_0: 192 case EEP_REG_0:
192 return pBase->regDmn[0]; 193 return pBase->regDmn[0];
@@ -453,6 +454,8 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
453 &tMinCalPower, gainBoundaries, 454 &tMinCalPower, gainBoundaries,
454 pdadcValues, numXpdGain); 455 pdadcValues, numXpdGain);
455 456
457 ENABLE_REGWRITE_BUFFER(ah);
458
456 if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) { 459 if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) {
457 REG_WRITE(ah, AR_PHY_TPCRG5 + regChainOffset, 460 REG_WRITE(ah, AR_PHY_TPCRG5 + regChainOffset,
458 SM(pdGainOverlap_t2, 461 SM(pdGainOverlap_t2,
@@ -493,6 +496,9 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
493 496
494 regOffset += 4; 497 regOffset += 4;
495 } 498 }
499
500 REGWRITE_BUFFER_FLUSH(ah);
501 DISABLE_REGWRITE_BUFFER(ah);
496 } 502 }
497 } 503 }
498 504
@@ -758,6 +764,8 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
758 ratesArray[i] -= AR5416_PWR_TABLE_OFFSET_DB * 2; 764 ratesArray[i] -= AR5416_PWR_TABLE_OFFSET_DB * 2;
759 } 765 }
760 766
767 ENABLE_REGWRITE_BUFFER(ah);
768
761 /* OFDM power per rate */ 769 /* OFDM power per rate */
762 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1, 770 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
763 ATH9K_POW_SM(ratesArray[rate18mb], 24) 771 ATH9K_POW_SM(ratesArray[rate18mb], 24)
@@ -820,6 +828,9 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
820 | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8) 828 | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
821 | ATH9K_POW_SM(ratesArray[rateDupCck], 0)); 829 | ATH9K_POW_SM(ratesArray[rateDupCck], 0));
822 } 830 }
831
832 REGWRITE_BUFFER_FLUSH(ah);
833 DISABLE_REGWRITE_BUFFER(ah);
823} 834}
824 835
825static void ath9k_hw_4k_set_addac(struct ath_hw *ah, 836static void ath9k_hw_4k_set_addac(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index d8ca94c3fa0c..b471db5fb82d 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include "hw.h" 17#include "hw.h"
18#include "ar9002_phy.h"
18 19
19static int ath9k_hw_AR9287_get_eeprom_ver(struct ath_hw *ah) 20static int ath9k_hw_AR9287_get_eeprom_ver(struct ath_hw *ah)
20{ 21{
@@ -172,11 +173,11 @@ static u32 ath9k_hw_AR9287_get_eeprom(struct ath_hw *ah,
172 switch (param) { 173 switch (param) {
173 case EEP_NFTHRESH_2: 174 case EEP_NFTHRESH_2:
174 return pModal->noiseFloorThreshCh[0]; 175 return pModal->noiseFloorThreshCh[0];
175 case AR_EEPROM_MAC(0): 176 case EEP_MAC_LSW:
176 return pBase->macAddr[0] << 8 | pBase->macAddr[1]; 177 return pBase->macAddr[0] << 8 | pBase->macAddr[1];
177 case AR_EEPROM_MAC(1): 178 case EEP_MAC_MID:
178 return pBase->macAddr[2] << 8 | pBase->macAddr[3]; 179 return pBase->macAddr[2] << 8 | pBase->macAddr[3];
179 case AR_EEPROM_MAC(2): 180 case EEP_MAC_MSW:
180 return pBase->macAddr[4] << 8 | pBase->macAddr[5]; 181 return pBase->macAddr[4] << 8 | pBase->macAddr[5];
181 case EEP_REG_0: 182 case EEP_REG_0:
182 return pBase->regDmn[0]; 183 return pBase->regDmn[0];
@@ -1169,7 +1170,7 @@ static u16 ath9k_hw_AR9287_get_spur_channel(struct ath_hw *ah,
1169#undef EEP_MAP9287_SPURCHAN 1170#undef EEP_MAP9287_SPURCHAN
1170} 1171}
1171 1172
1172const struct eeprom_ops eep_AR9287_ops = { 1173const struct eeprom_ops eep_ar9287_ops = {
1173 .check_eeprom = ath9k_hw_AR9287_check_eeprom, 1174 .check_eeprom = ath9k_hw_AR9287_check_eeprom,
1174 .get_eeprom = ath9k_hw_AR9287_get_eeprom, 1175 .get_eeprom = ath9k_hw_AR9287_get_eeprom,
1175 .fill_eeprom = ath9k_hw_AR9287_fill_eeprom, 1176 .fill_eeprom = ath9k_hw_AR9287_fill_eeprom,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 404a0341242c..e591ad6016e5 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include "hw.h" 17#include "hw.h"
18#include "ar9002_phy.h"
18 19
19static void ath9k_get_txgain_index(struct ath_hw *ah, 20static void ath9k_get_txgain_index(struct ath_hw *ah,
20 struct ath9k_channel *chan, 21 struct ath9k_channel *chan,
@@ -222,6 +223,12 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
222 return -EINVAL; 223 return -EINVAL;
223 } 224 }
224 225
226 /* Enable fixup for AR_AN_TOP2 if necessary */
227 if (AR_SREV_9280_10_OR_LATER(ah) &&
228 (eep->baseEepHeader.version & 0xff) > 0x0a &&
229 eep->baseEepHeader.pwdclkind == 0)
230 ah->need_an_top2_fixup = 1;
231
225 return 0; 232 return 0;
226} 233}
227 234
@@ -237,11 +244,11 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
237 return pModal[0].noiseFloorThreshCh[0]; 244 return pModal[0].noiseFloorThreshCh[0];
238 case EEP_NFTHRESH_2: 245 case EEP_NFTHRESH_2:
239 return pModal[1].noiseFloorThreshCh[0]; 246 return pModal[1].noiseFloorThreshCh[0];
240 case AR_EEPROM_MAC(0): 247 case EEP_MAC_LSW:
241 return pBase->macAddr[0] << 8 | pBase->macAddr[1]; 248 return pBase->macAddr[0] << 8 | pBase->macAddr[1];
242 case AR_EEPROM_MAC(1): 249 case EEP_MAC_MID:
243 return pBase->macAddr[2] << 8 | pBase->macAddr[3]; 250 return pBase->macAddr[2] << 8 | pBase->macAddr[3];
244 case AR_EEPROM_MAC(2): 251 case EEP_MAC_MSW:
245 return pBase->macAddr[4] << 8 | pBase->macAddr[5]; 252 return pBase->macAddr[4] << 8 | pBase->macAddr[5];
246 case EEP_REG_0: 253 case EEP_REG_0:
247 return pBase->regDmn[0]; 254 return pBase->regDmn[0];
@@ -267,6 +274,8 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
267 return pBase->txMask; 274 return pBase->txMask;
268 case EEP_RX_MASK: 275 case EEP_RX_MASK:
269 return pBase->rxMask; 276 return pBase->rxMask;
277 case EEP_FSTCLK_5G:
278 return pBase->fastClk5g;
270 case EEP_RXGAIN_TYPE: 279 case EEP_RXGAIN_TYPE:
271 return pBase->rxGainType; 280 return pBase->rxGainType;
272 case EEP_TXGAIN_TYPE: 281 case EEP_TXGAIN_TYPE:
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index fe994e229898..74872ca76f9a 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -93,14 +93,24 @@ static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
93 return ret; 93 return ret;
94} 94}
95 95
96static inline void ath9k_skb_queue_purge(struct hif_device_usb *hif_dev,
97 struct sk_buff_head *list)
98{
99 struct sk_buff *skb;
100
101 while ((skb = __skb_dequeue(list)) != NULL) {
102 dev_kfree_skb_any(skb);
103 TX_STAT_INC(skb_dropped);
104 }
105}
106
96static void hif_usb_tx_cb(struct urb *urb) 107static void hif_usb_tx_cb(struct urb *urb)
97{ 108{
98 struct tx_buf *tx_buf = (struct tx_buf *) urb->context; 109 struct tx_buf *tx_buf = (struct tx_buf *) urb->context;
99 struct hif_device_usb *hif_dev = tx_buf->hif_dev; 110 struct hif_device_usb *hif_dev = tx_buf->hif_dev;
100 struct sk_buff *skb; 111 struct sk_buff *skb;
101 bool drop, flush;
102 112
103 if (!hif_dev) 113 if (!hif_dev || !tx_buf)
104 return; 114 return;
105 115
106 switch (urb->status) { 116 switch (urb->status) {
@@ -108,52 +118,47 @@ static void hif_usb_tx_cb(struct urb *urb)
108 break; 118 break;
109 case -ENOENT: 119 case -ENOENT:
110 case -ECONNRESET: 120 case -ECONNRESET:
111 break;
112 case -ENODEV: 121 case -ENODEV:
113 case -ESHUTDOWN: 122 case -ESHUTDOWN:
123 /*
124 * The URB has been killed, free the SKBs
125 * and return.
126 */
127 ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
114 return; 128 return;
115 default: 129 default:
116 break; 130 break;
117 } 131 }
118 132
119 if (tx_buf) { 133 /* Check if TX has been stopped */
120 spin_lock(&hif_dev->tx.tx_lock); 134 spin_lock(&hif_dev->tx.tx_lock);
121 drop = !!(hif_dev->tx.flags & HIF_USB_TX_STOP); 135 if (hif_dev->tx.flags & HIF_USB_TX_STOP) {
122 flush = !!(hif_dev->tx.flags & HIF_USB_TX_FLUSH);
123 spin_unlock(&hif_dev->tx.tx_lock);
124
125 while ((skb = __skb_dequeue(&tx_buf->skb_queue)) != NULL) {
126 if (!drop && !flush) {
127 ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
128 skb, 1);
129 TX_STAT_INC(skb_completed);
130 } else {
131 dev_kfree_skb_any(skb);
132 }
133 }
134
135 if (flush)
136 return;
137
138 tx_buf->len = tx_buf->offset = 0;
139 __skb_queue_head_init(&tx_buf->skb_queue);
140
141 spin_lock(&hif_dev->tx.tx_lock);
142 list_del(&tx_buf->list);
143 list_add_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
144 hif_dev->tx.tx_buf_cnt++;
145 if (!drop)
146 __hif_usb_tx(hif_dev); /* Check for pending SKBs */
147 TX_STAT_INC(buf_completed);
148 spin_unlock(&hif_dev->tx.tx_lock); 136 spin_unlock(&hif_dev->tx.tx_lock);
149 } 137 ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
150} 138 goto add_free;
151 139 }
152static inline void ath9k_skb_queue_purge(struct sk_buff_head *list) 140 spin_unlock(&hif_dev->tx.tx_lock);
153{ 141
154 struct sk_buff *skb; 142 /* Complete the queued SKBs. */
155 while ((skb = __skb_dequeue(list)) != NULL) 143 while ((skb = __skb_dequeue(&tx_buf->skb_queue)) != NULL) {
156 dev_kfree_skb_any(skb); 144 ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
145 skb, 1);
146 TX_STAT_INC(skb_completed);
147 }
148
149add_free:
150 /* Re-initialize the SKB queue */
151 tx_buf->len = tx_buf->offset = 0;
152 __skb_queue_head_init(&tx_buf->skb_queue);
153
154 /* Add this TX buffer to the free list */
155 spin_lock(&hif_dev->tx.tx_lock);
156 list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
157 hif_dev->tx.tx_buf_cnt++;
158 if (!(hif_dev->tx.flags & HIF_USB_TX_STOP))
159 __hif_usb_tx(hif_dev); /* Check for pending SKBs */
160 TX_STAT_INC(buf_completed);
161 spin_unlock(&hif_dev->tx.tx_lock);
157} 162}
158 163
159/* TX lock has to be taken */ 164/* TX lock has to be taken */
@@ -173,8 +178,7 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
173 return 0; 178 return 0;
174 179
175 tx_buf = list_first_entry(&hif_dev->tx.tx_buf, struct tx_buf, list); 180 tx_buf = list_first_entry(&hif_dev->tx.tx_buf, struct tx_buf, list);
176 list_del(&tx_buf->list); 181 list_move_tail(&tx_buf->list, &hif_dev->tx.tx_pending);
177 list_add_tail(&tx_buf->list, &hif_dev->tx.tx_pending);
178 hif_dev->tx.tx_buf_cnt--; 182 hif_dev->tx.tx_buf_cnt--;
179 183
180 tx_skb_cnt = min_t(u16, hif_dev->tx.tx_skb_cnt, MAX_TX_AGGR_NUM); 184 tx_skb_cnt = min_t(u16, hif_dev->tx.tx_skb_cnt, MAX_TX_AGGR_NUM);
@@ -214,7 +218,7 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
214 ret = usb_submit_urb(tx_buf->urb, GFP_ATOMIC); 218 ret = usb_submit_urb(tx_buf->urb, GFP_ATOMIC);
215 if (ret) { 219 if (ret) {
216 tx_buf->len = tx_buf->offset = 0; 220 tx_buf->len = tx_buf->offset = 0;
217 ath9k_skb_queue_purge(&tx_buf->skb_queue); 221 ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
218 __skb_queue_head_init(&tx_buf->skb_queue); 222 __skb_queue_head_init(&tx_buf->skb_queue);
219 list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf); 223 list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
220 hif_dev->tx.tx_buf_cnt++; 224 hif_dev->tx.tx_buf_cnt++;
@@ -281,7 +285,7 @@ static void hif_usb_stop(void *hif_handle, u8 pipe_id)
281 unsigned long flags; 285 unsigned long flags;
282 286
283 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); 287 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
284 ath9k_skb_queue_purge(&hif_dev->tx.tx_skb_queue); 288 ath9k_skb_queue_purge(hif_dev, &hif_dev->tx.tx_skb_queue);
285 hif_dev->tx.tx_skb_cnt = 0; 289 hif_dev->tx.tx_skb_cnt = 0;
286 hif_dev->tx.flags |= HIF_USB_TX_STOP; 290 hif_dev->tx.flags |= HIF_USB_TX_STOP;
287 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); 291 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
@@ -506,9 +510,18 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
506 if (likely(urb->actual_length != 0)) { 510 if (likely(urb->actual_length != 0)) {
507 skb_put(skb, urb->actual_length); 511 skb_put(skb, urb->actual_length);
508 512
513 /* Process the command first */
514 ath9k_htc_rx_msg(hif_dev->htc_handle, skb,
515 skb->len, USB_REG_IN_PIPE);
516
517
509 nskb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC); 518 nskb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
510 if (!nskb) 519 if (!nskb) {
511 goto resubmit; 520 dev_err(&hif_dev->udev->dev,
521 "ath9k_htc: REG_IN memory allocation failure\n");
522 urb->context = NULL;
523 return;
524 }
512 525
513 usb_fill_int_urb(urb, hif_dev->udev, 526 usb_fill_int_urb(urb, hif_dev->udev,
514 usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE), 527 usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE),
@@ -518,12 +531,9 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
518 ret = usb_submit_urb(urb, GFP_ATOMIC); 531 ret = usb_submit_urb(urb, GFP_ATOMIC);
519 if (ret) { 532 if (ret) {
520 kfree_skb(nskb); 533 kfree_skb(nskb);
521 goto free; 534 urb->context = NULL;
522 } 535 }
523 536
524 ath9k_htc_rx_msg(hif_dev->htc_handle, skb,
525 skb->len, USB_REG_IN_PIPE);
526
527 return; 537 return;
528 } 538 }
529 539
@@ -543,20 +553,17 @@ free:
543 553
544static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev) 554static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
545{ 555{
546 unsigned long flags;
547 struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL; 556 struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
548 557
549 list_for_each_entry_safe(tx_buf, tx_buf_tmp, &hif_dev->tx.tx_buf, list) { 558 list_for_each_entry_safe(tx_buf, tx_buf_tmp,
559 &hif_dev->tx.tx_buf, list) {
560 usb_kill_urb(tx_buf->urb);
550 list_del(&tx_buf->list); 561 list_del(&tx_buf->list);
551 usb_free_urb(tx_buf->urb); 562 usb_free_urb(tx_buf->urb);
552 kfree(tx_buf->buf); 563 kfree(tx_buf->buf);
553 kfree(tx_buf); 564 kfree(tx_buf);
554 } 565 }
555 566
556 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
557 hif_dev->tx.flags |= HIF_USB_TX_FLUSH;
558 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
559
560 list_for_each_entry_safe(tx_buf, tx_buf_tmp, 567 list_for_each_entry_safe(tx_buf, tx_buf_tmp,
561 &hif_dev->tx.tx_pending, list) { 568 &hif_dev->tx.tx_pending, list) {
562 usb_kill_urb(tx_buf->urb); 569 usb_kill_urb(tx_buf->urb);
@@ -565,10 +572,6 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
565 kfree(tx_buf->buf); 572 kfree(tx_buf->buf);
566 kfree(tx_buf); 573 kfree(tx_buf);
567 } 574 }
568
569 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
570 hif_dev->tx.flags &= ~HIF_USB_TX_FLUSH;
571 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
572} 575}
573 576
574static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev) 577static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
@@ -892,6 +895,26 @@ err_alloc:
892 return ret; 895 return ret;
893} 896}
894 897
898static void ath9k_hif_usb_reboot(struct usb_device *udev)
899{
900 u32 reboot_cmd = 0xffffffff;
901 void *buf;
902 int ret;
903
904 buf = kmalloc(4, GFP_KERNEL);
905 if (!buf)
906 return;
907
908 memcpy(buf, &reboot_cmd, 4);
909
910 ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, USB_REG_OUT_PIPE),
911 buf, 4, NULL, HZ);
912 if (ret)
913 dev_err(&udev->dev, "ath9k_htc: USB reboot failed\n");
914
915 kfree(buf);
916}
917
895static void ath9k_hif_usb_disconnect(struct usb_interface *interface) 918static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
896{ 919{
897 struct usb_device *udev = interface_to_usbdev(interface); 920 struct usb_device *udev = interface_to_usbdev(interface);
@@ -899,14 +922,15 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
899 (struct hif_device_usb *) usb_get_intfdata(interface); 922 (struct hif_device_usb *) usb_get_intfdata(interface);
900 923
901 if (hif_dev) { 924 if (hif_dev) {
902 ath9k_htc_hw_deinit(hif_dev->htc_handle, true); 925 ath9k_htc_hw_deinit(hif_dev->htc_handle,
926 (udev->state == USB_STATE_NOTATTACHED) ? true : false);
903 ath9k_htc_hw_free(hif_dev->htc_handle); 927 ath9k_htc_hw_free(hif_dev->htc_handle);
904 ath9k_hif_usb_dev_deinit(hif_dev); 928 ath9k_hif_usb_dev_deinit(hif_dev);
905 usb_set_intfdata(interface, NULL); 929 usb_set_intfdata(interface, NULL);
906 } 930 }
907 931
908 if (hif_dev->flags & HIF_USB_START) 932 if (hif_dev->flags & HIF_USB_START)
909 usb_reset_device(udev); 933 ath9k_hif_usb_reboot(udev);
910 934
911 kfree(hif_dev); 935 kfree(hif_dev);
912 dev_info(&udev->dev, "ath9k_htc: USB layer deinitialized\n"); 936 dev_info(&udev->dev, "ath9k_htc: USB layer deinitialized\n");
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
index 7d49a8af420e..0aca49b6fcb6 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.h
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -61,7 +61,6 @@ struct tx_buf {
61}; 61};
62 62
63#define HIF_USB_TX_STOP BIT(0) 63#define HIF_USB_TX_STOP BIT(0)
64#define HIF_USB_TX_FLUSH BIT(1)
65 64
66struct hif_usb_tx { 65struct hif_usb_tx {
67 u8 flags; 66 u8 flags;
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 78213fc71b09..1ae18bbc4d9e 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -124,13 +124,13 @@ struct ath9k_htc_cap_target {
124struct ath9k_htc_target_vif { 124struct ath9k_htc_target_vif {
125 u8 index; 125 u8 index;
126 u8 des_bssid[ETH_ALEN]; 126 u8 des_bssid[ETH_ALEN];
127 enum htc_opmode opmode; 127 __be32 opmode;
128 u8 myaddr[ETH_ALEN]; 128 u8 myaddr[ETH_ALEN];
129 u8 bssid[ETH_ALEN]; 129 u8 bssid[ETH_ALEN];
130 u32 flags; 130 u32 flags;
131 u32 flags_ext; 131 u32 flags_ext;
132 u16 ps_sta; 132 u16 ps_sta;
133 u16 rtsthreshold; 133 __be16 rtsthreshold;
134 u8 ath_cap; 134 u8 ath_cap;
135 u8 node; 135 u8 node;
136 s8 mcast_rate; 136 s8 mcast_rate;
@@ -151,7 +151,7 @@ struct ath9k_htc_target_sta {
151 u8 sta_index; 151 u8 sta_index;
152 u8 vif_index; 152 u8 vif_index;
153 u8 vif_sta; 153 u8 vif_sta;
154 u16 flags; /* ATH_HTC_STA_* */ 154 __be16 flags; /* ATH_HTC_STA_* */
155 u16 htcap; 155 u16 htcap;
156 u8 valid; 156 u8 valid;
157 u16 capinfo; 157 u16 capinfo;
@@ -191,16 +191,16 @@ struct ath9k_htc_rate {
191struct ath9k_htc_target_rate { 191struct ath9k_htc_target_rate {
192 u8 sta_index; 192 u8 sta_index;
193 u8 isnew; 193 u8 isnew;
194 u32 capflags; 194 __be32 capflags;
195 struct ath9k_htc_rate rates; 195 struct ath9k_htc_rate rates;
196}; 196};
197 197
198struct ath9k_htc_target_stats { 198struct ath9k_htc_target_stats {
199 u32 tx_shortretry; 199 __be32 tx_shortretry;
200 u32 tx_longretry; 200 __be32 tx_longretry;
201 u32 tx_xretries; 201 __be32 tx_xretries;
202 u32 ht_txunaggr_xretry; 202 __be32 ht_txunaggr_xretry;
203 u32 ht_tx_xretries; 203 __be32 ht_tx_xretries;
204} __packed; 204} __packed;
205 205
206struct ath9k_htc_vif { 206struct ath9k_htc_vif {
@@ -261,6 +261,7 @@ struct ath_tx_stats {
261 u32 buf_completed; 261 u32 buf_completed;
262 u32 skb_queued; 262 u32 skb_queued;
263 u32 skb_completed; 263 u32 skb_completed;
264 u32 skb_dropped;
264}; 265};
265 266
266struct ath_rx_stats { 267struct ath_rx_stats {
@@ -328,6 +329,7 @@ struct htc_beacon_config {
328#define OP_ASSOCIATED BIT(8) 329#define OP_ASSOCIATED BIT(8)
329#define OP_ENABLE_BEACON BIT(9) 330#define OP_ENABLE_BEACON BIT(9)
330#define OP_LED_DEINIT BIT(10) 331#define OP_LED_DEINIT BIT(10)
332#define OP_UNPLUGGED BIT(11)
331 333
332struct ath9k_htc_priv { 334struct ath9k_htc_priv {
333 struct device *dev; 335 struct device *dev;
@@ -377,6 +379,7 @@ struct ath9k_htc_priv {
377 struct mutex htc_pm_lock; 379 struct mutex htc_pm_lock;
378 unsigned long ps_usecount; 380 unsigned long ps_usecount;
379 bool ps_enabled; 381 bool ps_enabled;
382 bool ps_idle;
380 383
381 struct ath_led radio_led; 384 struct ath_led radio_led;
382 struct ath_led assoc_led; 385 struct ath_led assoc_led;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 5e21f4d92ff5..7cb55f5b071c 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -26,7 +26,8 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
26 enum ath9k_int imask = 0; 26 enum ath9k_int imask = 0;
27 int dtimperiod, dtimcount, sleepduration; 27 int dtimperiod, dtimcount, sleepduration;
28 int cfpperiod, cfpcount, bmiss_timeout; 28 int cfpperiod, cfpcount, bmiss_timeout;
29 u32 nexttbtt = 0, intval, tsftu, htc_imask = 0; 29 u32 nexttbtt = 0, intval, tsftu;
30 __be32 htc_imask = 0;
30 u64 tsf; 31 u64 tsf;
31 int num_beacons, offset, dtim_dec_count, cfp_dec_count; 32 int num_beacons, offset, dtim_dec_count, cfp_dec_count;
32 int ret; 33 int ret;
@@ -142,7 +143,8 @@ static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
142{ 143{
143 struct ath_common *common = ath9k_hw_common(priv->ah); 144 struct ath_common *common = ath9k_hw_common(priv->ah);
144 enum ath9k_int imask = 0; 145 enum ath9k_int imask = 0;
145 u32 nexttbtt, intval, htc_imask = 0; 146 u32 nexttbtt, intval;
147 __be32 htc_imask = 0;
146 int ret; 148 int ret;
147 u8 cmd_rsp; 149 u8 cmd_rsp;
148 150
@@ -244,25 +246,20 @@ void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
244 struct ieee80211_vif *vif) 246 struct ieee80211_vif *vif)
245{ 247{
246 struct ath_common *common = ath9k_hw_common(priv->ah); 248 struct ath_common *common = ath9k_hw_common(priv->ah);
247 enum nl80211_iftype iftype;
248 struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf; 249 struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
250 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
249 251
250 if (vif) { 252 cur_conf->beacon_interval = bss_conf->beacon_int;
251 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
252 iftype = vif->type;
253 cur_conf->beacon_interval = bss_conf->beacon_int;
254 cur_conf->dtim_period = bss_conf->dtim_period;
255 cur_conf->listen_interval = 1;
256 cur_conf->dtim_count = 1;
257 cur_conf->bmiss_timeout =
258 ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
259 } else
260 iftype = priv->ah->opmode;
261
262 if (cur_conf->beacon_interval == 0) 253 if (cur_conf->beacon_interval == 0)
263 cur_conf->beacon_interval = 100; 254 cur_conf->beacon_interval = 100;
264 255
265 switch (iftype) { 256 cur_conf->dtim_period = bss_conf->dtim_period;
257 cur_conf->listen_interval = 1;
258 cur_conf->dtim_count = 1;
259 cur_conf->bmiss_timeout =
260 ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
261
262 switch (vif->type) {
266 case NL80211_IFTYPE_STATION: 263 case NL80211_IFTYPE_STATION:
267 ath9k_htc_beacon_config_sta(priv, cur_conf); 264 ath9k_htc_beacon_config_sta(priv, cur_conf);
268 break; 265 break;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index aed53573c547..701f2ef5a440 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -213,7 +213,7 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
213 ath9k_hw_regulatory(priv->ah)); 213 ath9k_hw_regulatory(priv->ah));
214} 214}
215 215
216static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset) 216static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
217{ 217{
218 struct ath_hw *ah = (struct ath_hw *) hw_priv; 218 struct ath_hw *ah = (struct ath_hw *) hw_priv;
219 struct ath_common *common = ath9k_hw_common(ah); 219 struct ath_common *common = ath9k_hw_common(ah);
@@ -235,7 +235,7 @@ static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
235 return be32_to_cpu(val); 235 return be32_to_cpu(val);
236} 236}
237 237
238static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset) 238static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
239{ 239{
240 struct ath_hw *ah = (struct ath_hw *) hw_priv; 240 struct ath_hw *ah = (struct ath_hw *) hw_priv;
241 struct ath_common *common = ath9k_hw_common(ah); 241 struct ath_common *common = ath9k_hw_common(ah);
@@ -257,9 +257,105 @@ static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
257 } 257 }
258} 258}
259 259
260static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset)
261{
262 struct ath_hw *ah = (struct ath_hw *) hw_priv;
263 struct ath_common *common = ath9k_hw_common(ah);
264 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
265 u32 rsp_status;
266 int r;
267
268 mutex_lock(&priv->wmi->multi_write_mutex);
269
270 /* Store the register/value */
271 priv->wmi->multi_write[priv->wmi->multi_write_idx].reg =
272 cpu_to_be32(reg_offset);
273 priv->wmi->multi_write[priv->wmi->multi_write_idx].val =
274 cpu_to_be32(val);
275
276 priv->wmi->multi_write_idx++;
277
278 /* If the buffer is full, send it out. */
279 if (priv->wmi->multi_write_idx == MAX_CMD_NUMBER) {
280 r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
281 (u8 *) &priv->wmi->multi_write,
282 sizeof(struct register_write) * priv->wmi->multi_write_idx,
283 (u8 *) &rsp_status, sizeof(rsp_status),
284 100);
285 if (unlikely(r)) {
286 ath_print(common, ATH_DBG_WMI,
287 "REGISTER WRITE FAILED, multi len: %d\n",
288 priv->wmi->multi_write_idx);
289 }
290 priv->wmi->multi_write_idx = 0;
291 }
292
293 mutex_unlock(&priv->wmi->multi_write_mutex);
294}
295
296static void ath9k_regwrite(void *hw_priv, u32 val, u32 reg_offset)
297{
298 struct ath_hw *ah = (struct ath_hw *) hw_priv;
299 struct ath_common *common = ath9k_hw_common(ah);
300 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
301
302 if (atomic_read(&priv->wmi->mwrite_cnt))
303 ath9k_regwrite_buffer(hw_priv, val, reg_offset);
304 else
305 ath9k_regwrite_single(hw_priv, val, reg_offset);
306}
307
308static void ath9k_enable_regwrite_buffer(void *hw_priv)
309{
310 struct ath_hw *ah = (struct ath_hw *) hw_priv;
311 struct ath_common *common = ath9k_hw_common(ah);
312 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
313
314 atomic_inc(&priv->wmi->mwrite_cnt);
315}
316
317static void ath9k_disable_regwrite_buffer(void *hw_priv)
318{
319 struct ath_hw *ah = (struct ath_hw *) hw_priv;
320 struct ath_common *common = ath9k_hw_common(ah);
321 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
322
323 atomic_dec(&priv->wmi->mwrite_cnt);
324}
325
326static void ath9k_regwrite_flush(void *hw_priv)
327{
328 struct ath_hw *ah = (struct ath_hw *) hw_priv;
329 struct ath_common *common = ath9k_hw_common(ah);
330 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
331 u32 rsp_status;
332 int r;
333
334 mutex_lock(&priv->wmi->multi_write_mutex);
335
336 if (priv->wmi->multi_write_idx) {
337 r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
338 (u8 *) &priv->wmi->multi_write,
339 sizeof(struct register_write) * priv->wmi->multi_write_idx,
340 (u8 *) &rsp_status, sizeof(rsp_status),
341 100);
342 if (unlikely(r)) {
343 ath_print(common, ATH_DBG_WMI,
344 "REGISTER WRITE FAILED, multi len: %d\n",
345 priv->wmi->multi_write_idx);
346 }
347 priv->wmi->multi_write_idx = 0;
348 }
349
350 mutex_unlock(&priv->wmi->multi_write_mutex);
351}
352
260static const struct ath_ops ath9k_common_ops = { 353static const struct ath_ops ath9k_common_ops = {
261 .read = ath9k_ioread32, 354 .read = ath9k_regread,
262 .write = ath9k_iowrite32, 355 .write = ath9k_regwrite,
356 .enable_write_buffer = ath9k_enable_regwrite_buffer,
357 .disable_write_buffer = ath9k_disable_regwrite_buffer,
358 .write_flush = ath9k_regwrite_flush,
263}; 359};
264 360
265static void ath_usb_read_cachesize(struct ath_common *common, int *csz) 361static void ath_usb_read_cachesize(struct ath_common *common, int *csz)
@@ -648,6 +744,9 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
648 if (ret) 744 if (ret)
649 goto err_init; 745 goto err_init;
650 746
747 /* The device may have been unplugged earlier. */
748 priv->op_flags &= ~OP_UNPLUGGED;
749
651 ret = ath9k_init_device(priv, devid); 750 ret = ath9k_init_device(priv, devid);
652 if (ret) 751 if (ret)
653 goto err_init; 752 goto err_init;
@@ -664,6 +763,11 @@ err_free:
664void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug) 763void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
665{ 764{
666 if (htc_handle->drv_priv) { 765 if (htc_handle->drv_priv) {
766
767 /* Check if the device has been yanked out. */
768 if (hotunplug)
769 htc_handle->drv_priv->op_flags |= OP_UNPLUGGED;
770
667 ath9k_deinit_device(htc_handle->drv_priv); 771 ath9k_deinit_device(htc_handle->drv_priv);
668 ath9k_deinit_wmi(htc_handle->drv_priv); 772 ath9k_deinit_wmi(htc_handle->drv_priv);
669 ieee80211_free_hw(htc_handle->drv_priv->hw); 773 ieee80211_free_hw(htc_handle->drv_priv->hw);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index eb7722b2cfcc..ca7f3a78eb11 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -94,8 +94,11 @@ void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv)
94 if (--priv->ps_usecount != 0) 94 if (--priv->ps_usecount != 0)
95 goto unlock; 95 goto unlock;
96 96
97 if (priv->ps_enabled) 97 if (priv->ps_idle)
98 ath9k_hw_setpower(priv->ah, ATH9K_PM_FULL_SLEEP);
99 else if (priv->ps_enabled)
98 ath9k_hw_setpower(priv->ah, ATH9K_PM_NETWORK_SLEEP); 100 ath9k_hw_setpower(priv->ah, ATH9K_PM_NETWORK_SLEEP);
101
99unlock: 102unlock:
100 mutex_unlock(&priv->htc_pm_lock); 103 mutex_unlock(&priv->htc_pm_lock);
101} 104}
@@ -125,7 +128,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
125 bool fastcc = true; 128 bool fastcc = true;
126 struct ieee80211_channel *channel = hw->conf.channel; 129 struct ieee80211_channel *channel = hw->conf.channel;
127 enum htc_phymode mode; 130 enum htc_phymode mode;
128 u16 htc_mode; 131 __be16 htc_mode;
129 u8 cmd_rsp; 132 u8 cmd_rsp;
130 int ret; 133 int ret;
131 134
@@ -153,7 +156,6 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
153 ath_print(common, ATH_DBG_FATAL, 156 ath_print(common, ATH_DBG_FATAL,
154 "Unable to reset channel (%u Mhz) " 157 "Unable to reset channel (%u Mhz) "
155 "reset status %d\n", channel->center_freq, ret); 158 "reset status %d\n", channel->center_freq, ret);
156 ath9k_htc_ps_restore(priv);
157 goto err; 159 goto err;
158 } 160 }
159 161
@@ -378,7 +380,7 @@ static int ath9k_htc_init_rate(struct ath9k_htc_priv *priv,
378 priv->tgt_rate.sta_index = ista->index; 380 priv->tgt_rate.sta_index = ista->index;
379 priv->tgt_rate.isnew = 1; 381 priv->tgt_rate.isnew = 1;
380 trate = priv->tgt_rate; 382 trate = priv->tgt_rate;
381 priv->tgt_rate.capflags = caps; 383 priv->tgt_rate.capflags = cpu_to_be32(caps);
382 trate.capflags = cpu_to_be32(caps); 384 trate.capflags = cpu_to_be32(caps);
383 385
384 WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate); 386 WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate);
@@ -426,6 +428,7 @@ static void ath9k_htc_rc_update(struct ath9k_htc_priv *priv, bool is_cw40)
426 struct ath9k_htc_target_rate trate; 428 struct ath9k_htc_target_rate trate;
427 struct ath_common *common = ath9k_hw_common(priv->ah); 429 struct ath_common *common = ath9k_hw_common(priv->ah);
428 int ret; 430 int ret;
431 u32 caps = be32_to_cpu(priv->tgt_rate.capflags);
429 u8 cmd_rsp; 432 u8 cmd_rsp;
430 433
431 memset(&trate, 0, sizeof(trate)); 434 memset(&trate, 0, sizeof(trate));
@@ -433,11 +436,12 @@ static void ath9k_htc_rc_update(struct ath9k_htc_priv *priv, bool is_cw40)
433 trate = priv->tgt_rate; 436 trate = priv->tgt_rate;
434 437
435 if (is_cw40) 438 if (is_cw40)
436 priv->tgt_rate.capflags |= WLAN_RC_40_FLAG; 439 caps |= WLAN_RC_40_FLAG;
437 else 440 else
438 priv->tgt_rate.capflags &= ~WLAN_RC_40_FLAG; 441 caps &= ~WLAN_RC_40_FLAG;
439 442
440 trate.capflags = cpu_to_be32(priv->tgt_rate.capflags); 443 priv->tgt_rate.capflags = cpu_to_be32(caps);
444 trate.capflags = cpu_to_be32(caps);
441 445
442 WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate); 446 WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate);
443 if (ret) { 447 if (ret) {
@@ -609,6 +613,9 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
609 len += snprintf(buf + len, sizeof(buf) - len, 613 len += snprintf(buf + len, sizeof(buf) - len,
610 "%20s : %10u\n", "SKBs completed", 614 "%20s : %10u\n", "SKBs completed",
611 priv->debug.tx_stats.skb_completed); 615 priv->debug.tx_stats.skb_completed);
616 len += snprintf(buf + len, sizeof(buf) - len,
617 "%20s : %10u\n", "SKBs dropped",
618 priv->debug.tx_stats.skb_dropped);
612 619
613 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 620 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
614} 621}
@@ -960,7 +967,6 @@ void ath9k_deinit_leds(struct ath9k_htc_priv *priv)
960 ath9k_unregister_led(&priv->tx_led); 967 ath9k_unregister_led(&priv->tx_led);
961 ath9k_unregister_led(&priv->rx_led); 968 ath9k_unregister_led(&priv->rx_led);
962 ath9k_unregister_led(&priv->radio_led); 969 ath9k_unregister_led(&priv->radio_led);
963 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
964} 970}
965 971
966void ath9k_init_leds(struct ath9k_htc_priv *priv) 972void ath9k_init_leds(struct ath9k_htc_priv *priv)
@@ -1093,7 +1099,7 @@ fail_tx:
1093 return 0; 1099 return 0;
1094} 1100}
1095 1101
1096static int ath9k_htc_start(struct ieee80211_hw *hw) 1102static int ath9k_htc_radio_enable(struct ieee80211_hw *hw)
1097{ 1103{
1098 struct ath9k_htc_priv *priv = hw->priv; 1104 struct ath9k_htc_priv *priv = hw->priv;
1099 struct ath_hw *ah = priv->ah; 1105 struct ath_hw *ah = priv->ah;
@@ -1102,15 +1108,13 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
1102 struct ath9k_channel *init_channel; 1108 struct ath9k_channel *init_channel;
1103 int ret = 0; 1109 int ret = 0;
1104 enum htc_phymode mode; 1110 enum htc_phymode mode;
1105 u16 htc_mode; 1111 __be16 htc_mode;
1106 u8 cmd_rsp; 1112 u8 cmd_rsp;
1107 1113
1108 ath_print(common, ATH_DBG_CONFIG, 1114 ath_print(common, ATH_DBG_CONFIG,
1109 "Starting driver with initial channel: %d MHz\n", 1115 "Starting driver with initial channel: %d MHz\n",
1110 curchan->center_freq); 1116 curchan->center_freq);
1111 1117
1112 mutex_lock(&priv->mutex);
1113
1114 /* setup initial channel */ 1118 /* setup initial channel */
1115 init_channel = ath9k_cmn_get_curchannel(hw, ah); 1119 init_channel = ath9k_cmn_get_curchannel(hw, ah);
1116 1120
@@ -1123,7 +1127,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
1123 ath_print(common, ATH_DBG_FATAL, 1127 ath_print(common, ATH_DBG_FATAL,
1124 "Unable to reset hardware; reset status %d " 1128 "Unable to reset hardware; reset status %d "
1125 "(freq %u MHz)\n", ret, curchan->center_freq); 1129 "(freq %u MHz)\n", ret, curchan->center_freq);
1126 goto mutex_unlock; 1130 return ret;
1127 } 1131 }
1128 1132
1129 ath_update_txpow(priv); 1133 ath_update_txpow(priv);
@@ -1131,16 +1135,8 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
1131 mode = ath9k_htc_get_curmode(priv, init_channel); 1135 mode = ath9k_htc_get_curmode(priv, init_channel);
1132 htc_mode = cpu_to_be16(mode); 1136 htc_mode = cpu_to_be16(mode);
1133 WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode); 1137 WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);
1134 if (ret)
1135 goto mutex_unlock;
1136
1137 WMI_CMD(WMI_ATH_INIT_CMDID); 1138 WMI_CMD(WMI_ATH_INIT_CMDID);
1138 if (ret)
1139 goto mutex_unlock;
1140
1141 WMI_CMD(WMI_START_RECV_CMDID); 1139 WMI_CMD(WMI_START_RECV_CMDID);
1142 if (ret)
1143 goto mutex_unlock;
1144 1140
1145 ath9k_host_rx_init(priv); 1141 ath9k_host_rx_init(priv);
1146 1142
@@ -1153,12 +1149,22 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
1153 1149
1154 ieee80211_wake_queues(hw); 1150 ieee80211_wake_queues(hw);
1155 1151
1156mutex_unlock: 1152 return ret;
1153}
1154
1155static int ath9k_htc_start(struct ieee80211_hw *hw)
1156{
1157 struct ath9k_htc_priv *priv = hw->priv;
1158 int ret = 0;
1159
1160 mutex_lock(&priv->mutex);
1161 ret = ath9k_htc_radio_enable(hw);
1157 mutex_unlock(&priv->mutex); 1162 mutex_unlock(&priv->mutex);
1163
1158 return ret; 1164 return ret;
1159} 1165}
1160 1166
1161static void ath9k_htc_stop(struct ieee80211_hw *hw) 1167static void ath9k_htc_radio_disable(struct ieee80211_hw *hw)
1162{ 1168{
1163 struct ath9k_htc_priv *priv = hw->priv; 1169 struct ath9k_htc_priv *priv = hw->priv;
1164 struct ath_hw *ah = priv->ah; 1170 struct ath_hw *ah = priv->ah;
@@ -1166,14 +1172,18 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1166 int ret = 0; 1172 int ret = 0;
1167 u8 cmd_rsp; 1173 u8 cmd_rsp;
1168 1174
1169 mutex_lock(&priv->mutex);
1170
1171 if (priv->op_flags & OP_INVALID) { 1175 if (priv->op_flags & OP_INVALID) {
1172 ath_print(common, ATH_DBG_ANY, "Device not present\n"); 1176 ath_print(common, ATH_DBG_ANY, "Device not present\n");
1173 mutex_unlock(&priv->mutex);
1174 return; 1177 return;
1175 } 1178 }
1176 1179
1180 /* Cancel all the running timers/work .. */
1181 cancel_work_sync(&priv->ps_work);
1182 cancel_delayed_work_sync(&priv->ath9k_ani_work);
1183 cancel_delayed_work_sync(&priv->ath9k_aggr_work);
1184 cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
1185 ath9k_led_stop_brightness(priv);
1186
1177 ath9k_htc_ps_wakeup(priv); 1187 ath9k_htc_ps_wakeup(priv);
1178 htc_stop(priv->htc); 1188 htc_stop(priv->htc);
1179 WMI_CMD(WMI_DISABLE_INTR_CMDID); 1189 WMI_CMD(WMI_DISABLE_INTR_CMDID);
@@ -1185,11 +1195,6 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1185 ath9k_htc_ps_restore(priv); 1195 ath9k_htc_ps_restore(priv);
1186 ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP); 1196 ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
1187 1197
1188 cancel_work_sync(&priv->ps_work);
1189 cancel_delayed_work_sync(&priv->ath9k_ani_work);
1190 cancel_delayed_work_sync(&priv->ath9k_aggr_work);
1191 cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
1192 ath9k_led_stop_brightness(priv);
1193 skb_queue_purge(&priv->tx_queue); 1198 skb_queue_purge(&priv->tx_queue);
1194 1199
1195 /* Remove monitor interface here */ 1200 /* Remove monitor interface here */
@@ -1203,11 +1208,20 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1203 } 1208 }
1204 1209
1205 priv->op_flags |= OP_INVALID; 1210 priv->op_flags |= OP_INVALID;
1206 mutex_unlock(&priv->mutex);
1207 1211
1208 ath_print(common, ATH_DBG_CONFIG, "Driver halt\n"); 1212 ath_print(common, ATH_DBG_CONFIG, "Driver halt\n");
1209} 1213}
1210 1214
1215static void ath9k_htc_stop(struct ieee80211_hw *hw)
1216{
1217 struct ath9k_htc_priv *priv = hw->priv;
1218
1219 mutex_lock(&priv->mutex);
1220 ath9k_htc_radio_disable(hw);
1221 mutex_unlock(&priv->mutex);
1222}
1223
1224
1211static int ath9k_htc_add_interface(struct ieee80211_hw *hw, 1225static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1212 struct ieee80211_vif *vif) 1226 struct ieee80211_vif *vif)
1213{ 1227{
@@ -1321,6 +1335,23 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1321 1335
1322 mutex_lock(&priv->mutex); 1336 mutex_lock(&priv->mutex);
1323 1337
1338 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
1339 bool enable_radio = false;
1340 bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
1341
1342 if (!idle && priv->ps_idle)
1343 enable_radio = true;
1344
1345 priv->ps_idle = idle;
1346
1347 if (enable_radio) {
1348 ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
1349 ath9k_htc_radio_enable(hw);
1350 ath_print(common, ATH_DBG_CONFIG,
1351 "not-idle: enabling radio\n");
1352 }
1353 }
1354
1324 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 1355 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1325 struct ieee80211_channel *curchan = hw->conf.channel; 1356 struct ieee80211_channel *curchan = hw->conf.channel;
1326 int pos = curchan->hw_value; 1357 int pos = curchan->hw_value;
@@ -1364,6 +1395,13 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1364 } 1395 }
1365 } 1396 }
1366 1397
1398 if (priv->ps_idle) {
1399 ath_print(common, ATH_DBG_CONFIG,
1400 "idle: disabling radio\n");
1401 ath9k_htc_radio_disable(hw);
1402 }
1403
1404
1367 mutex_unlock(&priv->mutex); 1405 mutex_unlock(&priv->mutex);
1368 1406
1369 return 0; 1407 return 0;
@@ -1687,7 +1725,7 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
1687 spin_unlock_bh(&priv->beacon_lock); 1725 spin_unlock_bh(&priv->beacon_lock);
1688 priv->op_flags |= OP_FULL_RESET; 1726 priv->op_flags |= OP_FULL_RESET;
1689 if (priv->op_flags & OP_ASSOCIATED) 1727 if (priv->op_flags & OP_ASSOCIATED)
1690 ath9k_htc_beacon_config(priv, NULL); 1728 ath9k_htc_beacon_config(priv, priv->vif);
1691 ath_start_ani(priv); 1729 ath_start_ani(priv);
1692 mutex_unlock(&priv->mutex); 1730 mutex_unlock(&priv->mutex);
1693 ath9k_htc_ps_restore(priv); 1731 ath9k_htc_ps_restore(priv);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 0a7cb30af5b4..28abc7d5e909 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -244,16 +244,25 @@ void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb,
244 enum htc_endpoint_id ep_id, bool txok) 244 enum htc_endpoint_id ep_id, bool txok)
245{ 245{
246 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) drv_priv; 246 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) drv_priv;
247 struct ath_common *common = ath9k_hw_common(priv->ah);
247 struct ieee80211_tx_info *tx_info; 248 struct ieee80211_tx_info *tx_info;
248 249
249 if (!skb) 250 if (!skb)
250 return; 251 return;
251 252
252 if (ep_id == priv->mgmt_ep) 253 if (ep_id == priv->mgmt_ep) {
253 skb_pull(skb, sizeof(struct tx_mgmt_hdr)); 254 skb_pull(skb, sizeof(struct tx_mgmt_hdr));
254 else 255 } else if ((ep_id == priv->data_bk_ep) ||
255 /* TODO: Check for cab/uapsd/data */ 256 (ep_id == priv->data_be_ep) ||
257 (ep_id == priv->data_vi_ep) ||
258 (ep_id == priv->data_vo_ep)) {
256 skb_pull(skb, sizeof(struct tx_frame_hdr)); 259 skb_pull(skb, sizeof(struct tx_frame_hdr));
260 } else {
261 ath_print(common, ATH_DBG_FATAL,
262 "Unsupported TX EPID: %d\n", ep_id);
263 dev_kfree_skb_any(skb);
264 return;
265 }
257 266
258 tx_info = IEEE80211_SKB_CB(skb); 267 tx_info = IEEE80211_SKB_CB(skb);
259 268
@@ -439,10 +448,32 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
439 struct ieee80211_hw *hw = priv->hw; 448 struct ieee80211_hw *hw = priv->hw;
440 struct sk_buff *skb = rxbuf->skb; 449 struct sk_buff *skb = rxbuf->skb;
441 struct ath_common *common = ath9k_hw_common(priv->ah); 450 struct ath_common *common = ath9k_hw_common(priv->ah);
451 struct ath_htc_rx_status *rxstatus;
442 int hdrlen, padpos, padsize; 452 int hdrlen, padpos, padsize;
443 int last_rssi = ATH_RSSI_DUMMY_MARKER; 453 int last_rssi = ATH_RSSI_DUMMY_MARKER;
444 __le16 fc; 454 __le16 fc;
445 455
456 if (skb->len <= HTC_RX_FRAME_HEADER_SIZE) {
457 ath_print(common, ATH_DBG_FATAL,
458 "Corrupted RX frame, dropping\n");
459 goto rx_next;
460 }
461
462 rxstatus = (struct ath_htc_rx_status *)skb->data;
463
464 if (be16_to_cpu(rxstatus->rs_datalen) -
465 (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0) {
466 ath_print(common, ATH_DBG_FATAL,
467 "Corrupted RX data len, dropping "
468 "(dlen: %d, skblen: %d)\n",
469 rxstatus->rs_datalen, skb->len);
470 goto rx_next;
471 }
472
473 /* Get the RX status information */
474 memcpy(&rxbuf->rxstatus, rxstatus, HTC_RX_FRAME_HEADER_SIZE);
475 skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
476
446 hdr = (struct ieee80211_hdr *)skb->data; 477 hdr = (struct ieee80211_hdr *)skb->data;
447 fc = hdr->frame_control; 478 fc = hdr->frame_control;
448 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 479 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
@@ -530,7 +561,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
530 priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi; 561 priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
531 } 562 }
532 563
533 rx_status->mactime = rxbuf->rxstatus.rs_tstamp; 564 rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
534 rx_status->band = hw->conf.channel->band; 565 rx_status->band = hw->conf.channel->band;
535 rx_status->freq = hw->conf.channel->center_freq; 566 rx_status->freq = hw->conf.channel->center_freq;
536 rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR; 567 rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
@@ -607,8 +638,6 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
607 struct ath_hw *ah = priv->ah; 638 struct ath_hw *ah = priv->ah;
608 struct ath_common *common = ath9k_hw_common(ah); 639 struct ath_common *common = ath9k_hw_common(ah);
609 struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL; 640 struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
610 struct ath_htc_rx_status *rxstatus;
611 u32 len = 0;
612 641
613 spin_lock(&priv->rx.rxbuflock); 642 spin_lock(&priv->rx.rxbuflock);
614 list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) { 643 list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
@@ -625,32 +654,7 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
625 goto err; 654 goto err;
626 } 655 }
627 656
628 len = skb->len;
629 if (len <= HTC_RX_FRAME_HEADER_SIZE) {
630 ath_print(common, ATH_DBG_FATAL,
631 "Corrupted RX frame, dropping\n");
632 goto err;
633 }
634
635 rxstatus = (struct ath_htc_rx_status *)skb->data;
636
637 rxstatus->rs_tstamp = be64_to_cpu(rxstatus->rs_tstamp);
638 rxstatus->rs_datalen = be16_to_cpu(rxstatus->rs_datalen);
639 rxstatus->evm0 = be32_to_cpu(rxstatus->evm0);
640 rxstatus->evm1 = be32_to_cpu(rxstatus->evm1);
641 rxstatus->evm2 = be32_to_cpu(rxstatus->evm2);
642
643 if (rxstatus->rs_datalen - (len - HTC_RX_FRAME_HEADER_SIZE) != 0) {
644 ath_print(common, ATH_DBG_FATAL,
645 "Corrupted RX data len, dropping "
646 "(epid: %d, dlen: %d, skblen: %d)\n",
647 ep_id, rxstatus->rs_datalen, len);
648 goto err;
649 }
650
651 spin_lock(&priv->rx.rxbuflock); 657 spin_lock(&priv->rx.rxbuflock);
652 memcpy(&rxbuf->rxstatus, rxstatus, HTC_RX_FRAME_HEADER_SIZE);
653 skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
654 rxbuf->skb = skb; 658 rxbuf->skb = skb;
655 rxbuf->in_process = true; 659 rxbuf->in_process = true;
656 spin_unlock(&priv->rx.rxbuflock); 660 spin_unlock(&priv->rx.rxbuflock);
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 587d98ed0989..7bf6ce1e7e2e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -341,8 +341,9 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
341 skb_pull(skb, sizeof(struct htc_frame_hdr)); 341 skb_pull(skb, sizeof(struct htc_frame_hdr));
342 342
343 if (endpoint->ep_callbacks.tx) { 343 if (endpoint->ep_callbacks.tx) {
344 endpoint->ep_callbacks.tx(htc_handle->drv_priv, skb, 344 endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv,
345 htc_hdr->endpoint_id, txok); 345 skb, htc_hdr->endpoint_id,
346 txok);
346 } 347 }
347 } 348 }
348 349
@@ -368,7 +369,7 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle,
368 struct htc_frame_hdr *htc_hdr; 369 struct htc_frame_hdr *htc_hdr;
369 enum htc_endpoint_id epid; 370 enum htc_endpoint_id epid;
370 struct htc_endpoint *endpoint; 371 struct htc_endpoint *endpoint;
371 u16 *msg_id; 372 __be16 *msg_id;
372 373
373 if (!htc_handle || !skb) 374 if (!htc_handle || !skb)
374 return; 375 return;
@@ -388,14 +389,14 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle,
388 389
389 /* Handle trailer */ 390 /* Handle trailer */
390 if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) { 391 if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) {
391 if (be32_to_cpu(*(u32 *) skb->data) == 0x00C60000) 392 if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000)
392 /* Move past the Watchdog pattern */ 393 /* Move past the Watchdog pattern */
393 htc_hdr = (struct htc_frame_hdr *)(skb->data + 4); 394 htc_hdr = (struct htc_frame_hdr *)(skb->data + 4);
394 } 395 }
395 396
396 /* Get the message ID */ 397 /* Get the message ID */
397 msg_id = (u16 *) ((void *) htc_hdr + 398 msg_id = (__be16 *) ((void *) htc_hdr +
398 sizeof(struct htc_frame_hdr)); 399 sizeof(struct htc_frame_hdr));
399 400
400 /* Now process HTC messages */ 401 /* Now process HTC messages */
401 switch (be16_to_cpu(*msg_id)) { 402 switch (be16_to_cpu(*msg_id)) {
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.h b/drivers/net/wireless/ath/ath9k/htc_hst.h
index cd7048ffd239..ea50ab032d20 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.h
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.h
@@ -59,20 +59,20 @@ enum htc_endpoint_id {
59struct htc_frame_hdr { 59struct htc_frame_hdr {
60 u8 endpoint_id; 60 u8 endpoint_id;
61 u8 flags; 61 u8 flags;
62 u16 payload_len; 62 __be16 payload_len;
63 u8 control[4]; 63 u8 control[4];
64} __packed; 64} __packed;
65 65
66struct htc_ready_msg { 66struct htc_ready_msg {
67 u16 message_id; 67 __be16 message_id;
68 u16 credits; 68 __be16 credits;
69 u16 credit_size; 69 __be16 credit_size;
70 u8 max_endpoints; 70 u8 max_endpoints;
71 u8 pad; 71 u8 pad;
72} __packed; 72} __packed;
73 73
74struct htc_config_pipe_msg { 74struct htc_config_pipe_msg {
75 u16 message_id; 75 __be16 message_id;
76 u8 pipe_id; 76 u8 pipe_id;
77 u8 credits; 77 u8 credits;
78} __packed; 78} __packed;
@@ -192,9 +192,9 @@ enum htc_service_group_ids{
192#define WMI_DATA_BK_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 8) 192#define WMI_DATA_BK_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 8)
193 193
194struct htc_conn_svc_msg { 194struct htc_conn_svc_msg {
195 u16 msg_id; 195 __be16 msg_id;
196 u16 service_id; 196 __be16 service_id;
197 u16 con_flags; 197 __be16 con_flags;
198 u8 dl_pipeid; 198 u8 dl_pipeid;
199 u8 ul_pipeid; 199 u8 ul_pipeid;
200 u8 svc_meta_len; 200 u8 svc_meta_len;
@@ -209,17 +209,17 @@ struct htc_conn_svc_msg {
209#define HTC_SERVICE_NO_MORE_EP 4 209#define HTC_SERVICE_NO_MORE_EP 4
210 210
211struct htc_conn_svc_rspmsg { 211struct htc_conn_svc_rspmsg {
212 u16 msg_id; 212 __be16 msg_id;
213 u16 service_id; 213 __be16 service_id;
214 u8 status; 214 u8 status;
215 u8 endpoint_id; 215 u8 endpoint_id;
216 u16 max_msg_len; 216 __be16 max_msg_len;
217 u8 svc_meta_len; 217 u8 svc_meta_len;
218 u8 pad; 218 u8 pad;
219} __packed; 219} __packed;
220 220
221struct htc_comp_msg { 221struct htc_comp_msg {
222 u16 msg_id; 222 __be16 msg_id;
223} __packed; 223} __packed;
224 224
225int htc_init(struct htc_target *target); 225int htc_init(struct htc_target *target);
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
new file mode 100644
index 000000000000..624422a8169e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -0,0 +1,280 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef ATH9K_HW_OPS_H
18#define ATH9K_HW_OPS_H
19
20#include "hw.h"
21
22/* Hardware core and driver accessible callbacks */
23
24static inline void ath9k_hw_configpcipowersave(struct ath_hw *ah,
25 int restore,
26 int power_off)
27{
28 ath9k_hw_ops(ah)->config_pci_powersave(ah, restore, power_off);
29}
30
31static inline void ath9k_hw_rxena(struct ath_hw *ah)
32{
33 ath9k_hw_ops(ah)->rx_enable(ah);
34}
35
36static inline void ath9k_hw_set_desc_link(struct ath_hw *ah, void *ds,
37 u32 link)
38{
39 ath9k_hw_ops(ah)->set_desc_link(ds, link);
40}
41
42static inline void ath9k_hw_get_desc_link(struct ath_hw *ah, void *ds,
43 u32 **link)
44{
45 ath9k_hw_ops(ah)->get_desc_link(ds, link);
46}
47static inline bool ath9k_hw_calibrate(struct ath_hw *ah,
48 struct ath9k_channel *chan,
49 u8 rxchainmask,
50 bool longcal)
51{
52 return ath9k_hw_ops(ah)->calibrate(ah, chan, rxchainmask, longcal);
53}
54
55static inline bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
56{
57 return ath9k_hw_ops(ah)->get_isr(ah, masked);
58}
59
60static inline void ath9k_hw_filltxdesc(struct ath_hw *ah, void *ds, u32 seglen,
61 bool is_firstseg, bool is_lastseg,
62 const void *ds0, dma_addr_t buf_addr,
63 unsigned int qcu)
64{
65 ath9k_hw_ops(ah)->fill_txdesc(ah, ds, seglen, is_firstseg, is_lastseg,
66 ds0, buf_addr, qcu);
67}
68
69static inline int ath9k_hw_txprocdesc(struct ath_hw *ah, void *ds,
70 struct ath_tx_status *ts)
71{
72 return ath9k_hw_ops(ah)->proc_txdesc(ah, ds, ts);
73}
74
75static inline void ath9k_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
76 u32 pktLen, enum ath9k_pkt_type type,
77 u32 txPower, u32 keyIx,
78 enum ath9k_key_type keyType,
79 u32 flags)
80{
81 ath9k_hw_ops(ah)->set11n_txdesc(ah, ds, pktLen, type, txPower, keyIx,
82 keyType, flags);
83}
84
85static inline void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
86 void *lastds,
87 u32 durUpdateEn, u32 rtsctsRate,
88 u32 rtsctsDuration,
89 struct ath9k_11n_rate_series series[],
90 u32 nseries, u32 flags)
91{
92 ath9k_hw_ops(ah)->set11n_ratescenario(ah, ds, lastds, durUpdateEn,
93 rtsctsRate, rtsctsDuration, series,
94 nseries, flags);
95}
96
97static inline void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
98 u32 aggrLen)
99{
100 ath9k_hw_ops(ah)->set11n_aggr_first(ah, ds, aggrLen);
101}
102
103static inline void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
104 u32 numDelims)
105{
106 ath9k_hw_ops(ah)->set11n_aggr_middle(ah, ds, numDelims);
107}
108
109static inline void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, void *ds)
110{
111 ath9k_hw_ops(ah)->set11n_aggr_last(ah, ds);
112}
113
114static inline void ath9k_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
115{
116 ath9k_hw_ops(ah)->clr11n_aggr(ah, ds);
117}
118
119static inline void ath9k_hw_set11n_burstduration(struct ath_hw *ah, void *ds,
120 u32 burstDuration)
121{
122 ath9k_hw_ops(ah)->set11n_burstduration(ah, ds, burstDuration);
123}
124
125static inline void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
126 u32 vmf)
127{
128 ath9k_hw_ops(ah)->set11n_virtualmorefrag(ah, ds, vmf);
129}
130
131/* Private hardware call ops */
132
133/* PHY ops */
134
135static inline int ath9k_hw_rf_set_freq(struct ath_hw *ah,
136 struct ath9k_channel *chan)
137{
138 return ath9k_hw_private_ops(ah)->rf_set_freq(ah, chan);
139}
140
141static inline void ath9k_hw_spur_mitigate_freq(struct ath_hw *ah,
142 struct ath9k_channel *chan)
143{
144 ath9k_hw_private_ops(ah)->spur_mitigate_freq(ah, chan);
145}
146
147static inline int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah)
148{
149 if (!ath9k_hw_private_ops(ah)->rf_alloc_ext_banks)
150 return 0;
151
152 return ath9k_hw_private_ops(ah)->rf_alloc_ext_banks(ah);
153}
154
155static inline void ath9k_hw_rf_free_ext_banks(struct ath_hw *ah)
156{
157 if (!ath9k_hw_private_ops(ah)->rf_free_ext_banks)
158 return;
159
160 ath9k_hw_private_ops(ah)->rf_free_ext_banks(ah);
161}
162
163static inline bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
164 struct ath9k_channel *chan,
165 u16 modesIndex)
166{
167 if (!ath9k_hw_private_ops(ah)->set_rf_regs)
168 return true;
169
170 return ath9k_hw_private_ops(ah)->set_rf_regs(ah, chan, modesIndex);
171}
172
173static inline void ath9k_hw_init_bb(struct ath_hw *ah,
174 struct ath9k_channel *chan)
175{
176 return ath9k_hw_private_ops(ah)->init_bb(ah, chan);
177}
178
179static inline void ath9k_hw_set_channel_regs(struct ath_hw *ah,
180 struct ath9k_channel *chan)
181{
182 return ath9k_hw_private_ops(ah)->set_channel_regs(ah, chan);
183}
184
185static inline int ath9k_hw_process_ini(struct ath_hw *ah,
186 struct ath9k_channel *chan)
187{
188 return ath9k_hw_private_ops(ah)->process_ini(ah, chan);
189}
190
191static inline void ath9k_olc_init(struct ath_hw *ah)
192{
193 if (!ath9k_hw_private_ops(ah)->olc_init)
194 return;
195
196 return ath9k_hw_private_ops(ah)->olc_init(ah);
197}
198
199static inline void ath9k_hw_set_rfmode(struct ath_hw *ah,
200 struct ath9k_channel *chan)
201{
202 return ath9k_hw_private_ops(ah)->set_rfmode(ah, chan);
203}
204
205static inline void ath9k_hw_mark_phy_inactive(struct ath_hw *ah)
206{
207 return ath9k_hw_private_ops(ah)->mark_phy_inactive(ah);
208}
209
210static inline void ath9k_hw_set_delta_slope(struct ath_hw *ah,
211 struct ath9k_channel *chan)
212{
213 return ath9k_hw_private_ops(ah)->set_delta_slope(ah, chan);
214}
215
216static inline bool ath9k_hw_rfbus_req(struct ath_hw *ah)
217{
218 return ath9k_hw_private_ops(ah)->rfbus_req(ah);
219}
220
221static inline void ath9k_hw_rfbus_done(struct ath_hw *ah)
222{
223 return ath9k_hw_private_ops(ah)->rfbus_done(ah);
224}
225
226static inline void ath9k_enable_rfkill(struct ath_hw *ah)
227{
228 return ath9k_hw_private_ops(ah)->enable_rfkill(ah);
229}
230
231static inline void ath9k_hw_restore_chainmask(struct ath_hw *ah)
232{
233 if (!ath9k_hw_private_ops(ah)->restore_chainmask)
234 return;
235
236 return ath9k_hw_private_ops(ah)->restore_chainmask(ah);
237}
238
239static inline void ath9k_hw_set_diversity(struct ath_hw *ah, bool value)
240{
241 return ath9k_hw_private_ops(ah)->set_diversity(ah, value);
242}
243
244static inline bool ath9k_hw_ani_control(struct ath_hw *ah,
245 enum ath9k_ani_cmd cmd, int param)
246{
247 return ath9k_hw_private_ops(ah)->ani_control(ah, cmd, param);
248}
249
250static inline void ath9k_hw_do_getnf(struct ath_hw *ah,
251 int16_t nfarray[NUM_NF_READINGS])
252{
253 ath9k_hw_private_ops(ah)->do_getnf(ah, nfarray);
254}
255
256static inline void ath9k_hw_loadnf(struct ath_hw *ah,
257 struct ath9k_channel *chan)
258{
259 ath9k_hw_private_ops(ah)->loadnf(ah, chan);
260}
261
262static inline bool ath9k_hw_init_cal(struct ath_hw *ah,
263 struct ath9k_channel *chan)
264{
265 return ath9k_hw_private_ops(ah)->init_cal(ah, chan);
266}
267
268static inline void ath9k_hw_setup_calibration(struct ath_hw *ah,
269 struct ath9k_cal_list *currCal)
270{
271 ath9k_hw_private_ops(ah)->setup_calibration(ah, currCal);
272}
273
274static inline bool ath9k_hw_iscal_supported(struct ath_hw *ah,
275 enum ath9k_cal_types calType)
276{
277 return ath9k_hw_private_ops(ah)->iscal_supported(ah, calType);
278}
279
280#endif /* ATH9K_HW_OPS_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index af730c7d50e6..559019262d30 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2010 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -19,15 +19,16 @@
19#include <asm/unaligned.h> 19#include <asm/unaligned.h>
20 20
21#include "hw.h" 21#include "hw.h"
22#include "hw-ops.h"
22#include "rc.h" 23#include "rc.h"
23#include "initvals.h" 24#include "ar9003_mac.h"
24 25
25#define ATH9K_CLOCK_RATE_CCK 22 26#define ATH9K_CLOCK_RATE_CCK 22
26#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40 27#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
27#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44 28#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
29#define ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM 44
28 30
29static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type); 31static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
30static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan);
31 32
32MODULE_AUTHOR("Atheros Communications"); 33MODULE_AUTHOR("Atheros Communications");
33MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards."); 34MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
@@ -46,6 +47,39 @@ static void __exit ath9k_exit(void)
46} 47}
47module_exit(ath9k_exit); 48module_exit(ath9k_exit);
48 49
50/* Private hardware callbacks */
51
52static void ath9k_hw_init_cal_settings(struct ath_hw *ah)
53{
54 ath9k_hw_private_ops(ah)->init_cal_settings(ah);
55}
56
57static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
58{
59 ath9k_hw_private_ops(ah)->init_mode_regs(ah);
60}
61
62static bool ath9k_hw_macversion_supported(struct ath_hw *ah)
63{
64 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
65
66 return priv_ops->macversion_supported(ah->hw_version.macVersion);
67}
68
69static u32 ath9k_hw_compute_pll_control(struct ath_hw *ah,
70 struct ath9k_channel *chan)
71{
72 return ath9k_hw_private_ops(ah)->compute_pll_control(ah, chan);
73}
74
75static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
76{
77 if (!ath9k_hw_private_ops(ah)->init_mode_gain_regs)
78 return;
79
80 ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah);
81}
82
49/********************/ 83/********************/
50/* Helper Functions */ 84/* Helper Functions */
51/********************/ 85/********************/
@@ -58,7 +92,11 @@ static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs)
58 return usecs *ATH9K_CLOCK_RATE_CCK; 92 return usecs *ATH9K_CLOCK_RATE_CCK;
59 if (conf->channel->band == IEEE80211_BAND_2GHZ) 93 if (conf->channel->band == IEEE80211_BAND_2GHZ)
60 return usecs *ATH9K_CLOCK_RATE_2GHZ_OFDM; 94 return usecs *ATH9K_CLOCK_RATE_2GHZ_OFDM;
61 return usecs *ATH9K_CLOCK_RATE_5GHZ_OFDM; 95
96 if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK)
97 return usecs * ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM;
98 else
99 return usecs * ATH9K_CLOCK_RATE_5GHZ_OFDM;
62} 100}
63 101
64static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs) 102static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs)
@@ -233,21 +271,6 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
233 } 271 }
234} 272}
235 273
236static int ath9k_hw_get_radiorev(struct ath_hw *ah)
237{
238 u32 val;
239 int i;
240
241 REG_WRITE(ah, AR_PHY(0x36), 0x00007058);
242
243 for (i = 0; i < 8; i++)
244 REG_WRITE(ah, AR_PHY(0x20), 0x00010000);
245 val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff;
246 val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4);
247
248 return ath9k_hw_reverse_bits(val, 8);
249}
250
251/************************************/ 274/************************************/
252/* HW Attach, Detach, Init Routines */ 275/* HW Attach, Detach, Init Routines */
253/************************************/ 276/************************************/
@@ -257,6 +280,8 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
257 if (AR_SREV_9100(ah)) 280 if (AR_SREV_9100(ah))
258 return; 281 return;
259 282
283 ENABLE_REGWRITE_BUFFER(ah);
284
260 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00); 285 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
261 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924); 286 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
262 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029); 287 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029);
@@ -268,20 +293,30 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
268 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007); 293 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007);
269 294
270 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); 295 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
296
297 REGWRITE_BUFFER_FLUSH(ah);
298 DISABLE_REGWRITE_BUFFER(ah);
271} 299}
272 300
301/* This should work for all families including legacy */
273static bool ath9k_hw_chip_test(struct ath_hw *ah) 302static bool ath9k_hw_chip_test(struct ath_hw *ah)
274{ 303{
275 struct ath_common *common = ath9k_hw_common(ah); 304 struct ath_common *common = ath9k_hw_common(ah);
276 u32 regAddr[2] = { AR_STA_ID0, AR_PHY_BASE + (8 << 2) }; 305 u32 regAddr[2] = { AR_STA_ID0 };
277 u32 regHold[2]; 306 u32 regHold[2];
278 u32 patternData[4] = { 0x55555555, 307 u32 patternData[4] = { 0x55555555,
279 0xaaaaaaaa, 308 0xaaaaaaaa,
280 0x66666666, 309 0x66666666,
281 0x99999999 }; 310 0x99999999 };
282 int i, j; 311 int i, j, loop_max;
283 312
284 for (i = 0; i < 2; i++) { 313 if (!AR_SREV_9300_20_OR_LATER(ah)) {
314 loop_max = 2;
315 regAddr[1] = AR_PHY_BASE + (8 << 2);
316 } else
317 loop_max = 1;
318
319 for (i = 0; i < loop_max; i++) {
285 u32 addr = regAddr[i]; 320 u32 addr = regAddr[i];
286 u32 wrData, rdData; 321 u32 wrData, rdData;
287 322
@@ -336,7 +371,13 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
336 ah->config.ofdm_trig_high = 500; 371 ah->config.ofdm_trig_high = 500;
337 ah->config.cck_trig_high = 200; 372 ah->config.cck_trig_high = 200;
338 ah->config.cck_trig_low = 100; 373 ah->config.cck_trig_low = 100;
339 ah->config.enable_ani = 1; 374
375 /*
376 * For now ANI is disabled for AR9003, it is still
377 * being tested.
378 */
379 if (!AR_SREV_9300_20_OR_LATER(ah))
380 ah->config.enable_ani = 1;
340 381
341 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { 382 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
342 ah->config.spurchans[i][0] = AR_NO_SPUR; 383 ah->config.spurchans[i][0] = AR_NO_SPUR;
@@ -351,6 +392,12 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
351 ah->config.rx_intr_mitigation = true; 392 ah->config.rx_intr_mitigation = true;
352 393
353 /* 394 /*
395 * Tx IQ Calibration (ah->config.tx_iq_calibration) is only
396 * used by AR9003, but it is showing reliability issues.
397 * It will take a while to fix so this is currently disabled.
398 */
399
400 /*
354 * We need this for PCI devices only (Cardbus, PCI, miniPCI) 401 * We need this for PCI devices only (Cardbus, PCI, miniPCI)
355 * _and_ if on non-uniprocessor systems (Multiprocessor/HT). 402 * _and_ if on non-uniprocessor systems (Multiprocessor/HT).
356 * This means we use it for all AR5416 devices, and the few 403 * This means we use it for all AR5416 devices, and the few
@@ -369,7 +416,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
369 if (num_possible_cpus() > 1) 416 if (num_possible_cpus() > 1)
370 ah->config.serialize_regmode = SER_REG_MODE_AUTO; 417 ah->config.serialize_regmode = SER_REG_MODE_AUTO;
371} 418}
372EXPORT_SYMBOL(ath9k_hw_init);
373 419
374static void ath9k_hw_init_defaults(struct ath_hw *ah) 420static void ath9k_hw_init_defaults(struct ath_hw *ah)
375{ 421{
@@ -383,8 +429,6 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
383 ah->hw_version.subvendorid = 0; 429 ah->hw_version.subvendorid = 0;
384 430
385 ah->ah_flags = 0; 431 ah->ah_flags = 0;
386 if (ah->hw_version.devid == AR5416_AR9100_DEVID)
387 ah->hw_version.macVersion = AR_SREV_VERSION_9100;
388 if (!AR_SREV_9100(ah)) 432 if (!AR_SREV_9100(ah))
389 ah->ah_flags = AH_USE_EEPROM; 433 ah->ah_flags = AH_USE_EEPROM;
390 434
@@ -397,44 +441,17 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
397 ah->power_mode = ATH9K_PM_UNDEFINED; 441 ah->power_mode = ATH9K_PM_UNDEFINED;
398} 442}
399 443
400static int ath9k_hw_rf_claim(struct ath_hw *ah)
401{
402 u32 val;
403
404 REG_WRITE(ah, AR_PHY(0), 0x00000007);
405
406 val = ath9k_hw_get_radiorev(ah);
407 switch (val & AR_RADIO_SREV_MAJOR) {
408 case 0:
409 val = AR_RAD5133_SREV_MAJOR;
410 break;
411 case AR_RAD5133_SREV_MAJOR:
412 case AR_RAD5122_SREV_MAJOR:
413 case AR_RAD2133_SREV_MAJOR:
414 case AR_RAD2122_SREV_MAJOR:
415 break;
416 default:
417 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
418 "Radio Chip Rev 0x%02X not supported\n",
419 val & AR_RADIO_SREV_MAJOR);
420 return -EOPNOTSUPP;
421 }
422
423 ah->hw_version.analog5GhzRev = val;
424
425 return 0;
426}
427
428static int ath9k_hw_init_macaddr(struct ath_hw *ah) 444static int ath9k_hw_init_macaddr(struct ath_hw *ah)
429{ 445{
430 struct ath_common *common = ath9k_hw_common(ah); 446 struct ath_common *common = ath9k_hw_common(ah);
431 u32 sum; 447 u32 sum;
432 int i; 448 int i;
433 u16 eeval; 449 u16 eeval;
450 u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW };
434 451
435 sum = 0; 452 sum = 0;
436 for (i = 0; i < 3; i++) { 453 for (i = 0; i < 3; i++) {
437 eeval = ah->eep_ops->get_eeprom(ah, AR_EEPROM_MAC(i)); 454 eeval = ah->eep_ops->get_eeprom(ah, EEP_MAC[i]);
438 sum += eeval; 455 sum += eeval;
439 common->macaddr[2 * i] = eeval >> 8; 456 common->macaddr[2 * i] = eeval >> 8;
440 common->macaddr[2 * i + 1] = eeval & 0xff; 457 common->macaddr[2 * i + 1] = eeval & 0xff;
@@ -445,54 +462,6 @@ static int ath9k_hw_init_macaddr(struct ath_hw *ah)
445 return 0; 462 return 0;
446} 463}
447 464
448static void ath9k_hw_init_rxgain_ini(struct ath_hw *ah)
449{
450 u32 rxgain_type;
451
452 if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_17) {
453 rxgain_type = ah->eep_ops->get_eeprom(ah, EEP_RXGAIN_TYPE);
454
455 if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF)
456 INIT_INI_ARRAY(&ah->iniModesRxGain,
457 ar9280Modes_backoff_13db_rxgain_9280_2,
458 ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 6);
459 else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF)
460 INIT_INI_ARRAY(&ah->iniModesRxGain,
461 ar9280Modes_backoff_23db_rxgain_9280_2,
462 ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 6);
463 else
464 INIT_INI_ARRAY(&ah->iniModesRxGain,
465 ar9280Modes_original_rxgain_9280_2,
466 ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
467 } else {
468 INIT_INI_ARRAY(&ah->iniModesRxGain,
469 ar9280Modes_original_rxgain_9280_2,
470 ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
471 }
472}
473
474static void ath9k_hw_init_txgain_ini(struct ath_hw *ah)
475{
476 u32 txgain_type;
477
478 if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_19) {
479 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
480
481 if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
482 INIT_INI_ARRAY(&ah->iniModesTxGain,
483 ar9280Modes_high_power_tx_gain_9280_2,
484 ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 6);
485 else
486 INIT_INI_ARRAY(&ah->iniModesTxGain,
487 ar9280Modes_original_tx_gain_9280_2,
488 ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
489 } else {
490 INIT_INI_ARRAY(&ah->iniModesTxGain,
491 ar9280Modes_original_tx_gain_9280_2,
492 ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
493 }
494}
495
496static int ath9k_hw_post_init(struct ath_hw *ah) 465static int ath9k_hw_post_init(struct ath_hw *ah)
497{ 466{
498 int ecode; 467 int ecode;
@@ -502,9 +471,11 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
502 return -ENODEV; 471 return -ENODEV;
503 } 472 }
504 473
505 ecode = ath9k_hw_rf_claim(ah); 474 if (!AR_SREV_9300_20_OR_LATER(ah)) {
506 if (ecode != 0) 475 ecode = ar9002_hw_rf_claim(ah);
507 return ecode; 476 if (ecode != 0)
477 return ecode;
478 }
508 479
509 ecode = ath9k_hw_eeprom_init(ah); 480 ecode = ath9k_hw_eeprom_init(ah);
510 if (ecode != 0) 481 if (ecode != 0)
@@ -515,14 +486,12 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
515 ah->eep_ops->get_eeprom_ver(ah), 486 ah->eep_ops->get_eeprom_ver(ah),
516 ah->eep_ops->get_eeprom_rev(ah)); 487 ah->eep_ops->get_eeprom_rev(ah));
517 488
518 if (!AR_SREV_9280_10_OR_LATER(ah)) { 489 ecode = ath9k_hw_rf_alloc_ext_banks(ah);
519 ecode = ath9k_hw_rf_alloc_ext_banks(ah); 490 if (ecode) {
520 if (ecode) { 491 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
521 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, 492 "Failed allocating banks for "
522 "Failed allocating banks for " 493 "external radio\n");
523 "external radio\n"); 494 return ecode;
524 return ecode;
525 }
526 } 495 }
527 496
528 if (!AR_SREV_9100(ah)) { 497 if (!AR_SREV_9100(ah)) {
@@ -533,344 +502,22 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
533 return 0; 502 return 0;
534} 503}
535 504
536static bool ath9k_hw_devid_supported(u16 devid) 505static void ath9k_hw_attach_ops(struct ath_hw *ah)
537{
538 switch (devid) {
539 case AR5416_DEVID_PCI:
540 case AR5416_DEVID_PCIE:
541 case AR5416_AR9100_DEVID:
542 case AR9160_DEVID_PCI:
543 case AR9280_DEVID_PCI:
544 case AR9280_DEVID_PCIE:
545 case AR9285_DEVID_PCIE:
546 case AR5416_DEVID_AR9287_PCI:
547 case AR5416_DEVID_AR9287_PCIE:
548 case AR2427_DEVID_PCIE:
549 return true;
550 default:
551 break;
552 }
553 return false;
554}
555
556static bool ath9k_hw_macversion_supported(u32 macversion)
557{ 506{
558 switch (macversion) { 507 if (AR_SREV_9300_20_OR_LATER(ah))
559 case AR_SREV_VERSION_5416_PCI: 508 ar9003_hw_attach_ops(ah);
560 case AR_SREV_VERSION_5416_PCIE: 509 else
561 case AR_SREV_VERSION_9160: 510 ar9002_hw_attach_ops(ah);
562 case AR_SREV_VERSION_9100:
563 case AR_SREV_VERSION_9280:
564 case AR_SREV_VERSION_9285:
565 case AR_SREV_VERSION_9287:
566 case AR_SREV_VERSION_9271:
567 return true;
568 default:
569 break;
570 }
571 return false;
572}
573
574static void ath9k_hw_init_cal_settings(struct ath_hw *ah)
575{
576 if (AR_SREV_9160_10_OR_LATER(ah)) {
577 if (AR_SREV_9280_10_OR_LATER(ah)) {
578 ah->iq_caldata.calData = &iq_cal_single_sample;
579 ah->adcgain_caldata.calData =
580 &adc_gain_cal_single_sample;
581 ah->adcdc_caldata.calData =
582 &adc_dc_cal_single_sample;
583 ah->adcdc_calinitdata.calData =
584 &adc_init_dc_cal;
585 } else {
586 ah->iq_caldata.calData = &iq_cal_multi_sample;
587 ah->adcgain_caldata.calData =
588 &adc_gain_cal_multi_sample;
589 ah->adcdc_caldata.calData =
590 &adc_dc_cal_multi_sample;
591 ah->adcdc_calinitdata.calData =
592 &adc_init_dc_cal;
593 }
594 ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
595 }
596}
597
598static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
599{
600 if (AR_SREV_9271(ah)) {
601 INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271,
602 ARRAY_SIZE(ar9271Modes_9271), 6);
603 INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271,
604 ARRAY_SIZE(ar9271Common_9271), 2);
605 INIT_INI_ARRAY(&ah->iniCommon_normal_cck_fir_coeff_9271,
606 ar9271Common_normal_cck_fir_coeff_9271,
607 ARRAY_SIZE(ar9271Common_normal_cck_fir_coeff_9271), 2);
608 INIT_INI_ARRAY(&ah->iniCommon_japan_2484_cck_fir_coeff_9271,
609 ar9271Common_japan_2484_cck_fir_coeff_9271,
610 ARRAY_SIZE(ar9271Common_japan_2484_cck_fir_coeff_9271), 2);
611 INIT_INI_ARRAY(&ah->iniModes_9271_1_0_only,
612 ar9271Modes_9271_1_0_only,
613 ARRAY_SIZE(ar9271Modes_9271_1_0_only), 6);
614 INIT_INI_ARRAY(&ah->iniModes_9271_ANI_reg, ar9271Modes_9271_ANI_reg,
615 ARRAY_SIZE(ar9271Modes_9271_ANI_reg), 6);
616 INIT_INI_ARRAY(&ah->iniModes_high_power_tx_gain_9271,
617 ar9271Modes_high_power_tx_gain_9271,
618 ARRAY_SIZE(ar9271Modes_high_power_tx_gain_9271), 6);
619 INIT_INI_ARRAY(&ah->iniModes_normal_power_tx_gain_9271,
620 ar9271Modes_normal_power_tx_gain_9271,
621 ARRAY_SIZE(ar9271Modes_normal_power_tx_gain_9271), 6);
622 return;
623 }
624
625 if (AR_SREV_9287_11_OR_LATER(ah)) {
626 INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1,
627 ARRAY_SIZE(ar9287Modes_9287_1_1), 6);
628 INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1,
629 ARRAY_SIZE(ar9287Common_9287_1_1), 2);
630 if (ah->config.pcie_clock_req)
631 INIT_INI_ARRAY(&ah->iniPcieSerdes,
632 ar9287PciePhy_clkreq_off_L1_9287_1_1,
633 ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_1), 2);
634 else
635 INIT_INI_ARRAY(&ah->iniPcieSerdes,
636 ar9287PciePhy_clkreq_always_on_L1_9287_1_1,
637 ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_1),
638 2);
639 } else if (AR_SREV_9287_10_OR_LATER(ah)) {
640 INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_0,
641 ARRAY_SIZE(ar9287Modes_9287_1_0), 6);
642 INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_0,
643 ARRAY_SIZE(ar9287Common_9287_1_0), 2);
644
645 if (ah->config.pcie_clock_req)
646 INIT_INI_ARRAY(&ah->iniPcieSerdes,
647 ar9287PciePhy_clkreq_off_L1_9287_1_0,
648 ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_0), 2);
649 else
650 INIT_INI_ARRAY(&ah->iniPcieSerdes,
651 ar9287PciePhy_clkreq_always_on_L1_9287_1_0,
652 ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_0),
653 2);
654 } else if (AR_SREV_9285_12_OR_LATER(ah)) {
655
656
657 INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2,
658 ARRAY_SIZE(ar9285Modes_9285_1_2), 6);
659 INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2,
660 ARRAY_SIZE(ar9285Common_9285_1_2), 2);
661
662 if (ah->config.pcie_clock_req) {
663 INIT_INI_ARRAY(&ah->iniPcieSerdes,
664 ar9285PciePhy_clkreq_off_L1_9285_1_2,
665 ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285_1_2), 2);
666 } else {
667 INIT_INI_ARRAY(&ah->iniPcieSerdes,
668 ar9285PciePhy_clkreq_always_on_L1_9285_1_2,
669 ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285_1_2),
670 2);
671 }
672 } else if (AR_SREV_9285_10_OR_LATER(ah)) {
673 INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285,
674 ARRAY_SIZE(ar9285Modes_9285), 6);
675 INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285,
676 ARRAY_SIZE(ar9285Common_9285), 2);
677
678 if (ah->config.pcie_clock_req) {
679 INIT_INI_ARRAY(&ah->iniPcieSerdes,
680 ar9285PciePhy_clkreq_off_L1_9285,
681 ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285), 2);
682 } else {
683 INIT_INI_ARRAY(&ah->iniPcieSerdes,
684 ar9285PciePhy_clkreq_always_on_L1_9285,
685 ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285), 2);
686 }
687 } else if (AR_SREV_9280_20_OR_LATER(ah)) {
688 INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2,
689 ARRAY_SIZE(ar9280Modes_9280_2), 6);
690 INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2,
691 ARRAY_SIZE(ar9280Common_9280_2), 2);
692
693 if (ah->config.pcie_clock_req) {
694 INIT_INI_ARRAY(&ah->iniPcieSerdes,
695 ar9280PciePhy_clkreq_off_L1_9280,
696 ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280),2);
697 } else {
698 INIT_INI_ARRAY(&ah->iniPcieSerdes,
699 ar9280PciePhy_clkreq_always_on_L1_9280,
700 ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2);
701 }
702 INIT_INI_ARRAY(&ah->iniModesAdditional,
703 ar9280Modes_fast_clock_9280_2,
704 ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3);
705 } else if (AR_SREV_9280_10_OR_LATER(ah)) {
706 INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280,
707 ARRAY_SIZE(ar9280Modes_9280), 6);
708 INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280,
709 ARRAY_SIZE(ar9280Common_9280), 2);
710 } else if (AR_SREV_9160_10_OR_LATER(ah)) {
711 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160,
712 ARRAY_SIZE(ar5416Modes_9160), 6);
713 INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160,
714 ARRAY_SIZE(ar5416Common_9160), 2);
715 INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9160,
716 ARRAY_SIZE(ar5416Bank0_9160), 2);
717 INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9160,
718 ARRAY_SIZE(ar5416BB_RfGain_9160), 3);
719 INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9160,
720 ARRAY_SIZE(ar5416Bank1_9160), 2);
721 INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9160,
722 ARRAY_SIZE(ar5416Bank2_9160), 2);
723 INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9160,
724 ARRAY_SIZE(ar5416Bank3_9160), 3);
725 INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9160,
726 ARRAY_SIZE(ar5416Bank6_9160), 3);
727 INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9160,
728 ARRAY_SIZE(ar5416Bank6TPC_9160), 3);
729 INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9160,
730 ARRAY_SIZE(ar5416Bank7_9160), 2);
731 if (AR_SREV_9160_11(ah)) {
732 INIT_INI_ARRAY(&ah->iniAddac,
733 ar5416Addac_91601_1,
734 ARRAY_SIZE(ar5416Addac_91601_1), 2);
735 } else {
736 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160,
737 ARRAY_SIZE(ar5416Addac_9160), 2);
738 }
739 } else if (AR_SREV_9100_OR_LATER(ah)) {
740 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100,
741 ARRAY_SIZE(ar5416Modes_9100), 6);
742 INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100,
743 ARRAY_SIZE(ar5416Common_9100), 2);
744 INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9100,
745 ARRAY_SIZE(ar5416Bank0_9100), 2);
746 INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9100,
747 ARRAY_SIZE(ar5416BB_RfGain_9100), 3);
748 INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9100,
749 ARRAY_SIZE(ar5416Bank1_9100), 2);
750 INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9100,
751 ARRAY_SIZE(ar5416Bank2_9100), 2);
752 INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9100,
753 ARRAY_SIZE(ar5416Bank3_9100), 3);
754 INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100,
755 ARRAY_SIZE(ar5416Bank6_9100), 3);
756 INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9100,
757 ARRAY_SIZE(ar5416Bank6TPC_9100), 3);
758 INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9100,
759 ARRAY_SIZE(ar5416Bank7_9100), 2);
760 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100,
761 ARRAY_SIZE(ar5416Addac_9100), 2);
762 } else {
763 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes,
764 ARRAY_SIZE(ar5416Modes), 6);
765 INIT_INI_ARRAY(&ah->iniCommon, ar5416Common,
766 ARRAY_SIZE(ar5416Common), 2);
767 INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0,
768 ARRAY_SIZE(ar5416Bank0), 2);
769 INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain,
770 ARRAY_SIZE(ar5416BB_RfGain), 3);
771 INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1,
772 ARRAY_SIZE(ar5416Bank1), 2);
773 INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2,
774 ARRAY_SIZE(ar5416Bank2), 2);
775 INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3,
776 ARRAY_SIZE(ar5416Bank3), 3);
777 INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6,
778 ARRAY_SIZE(ar5416Bank6), 3);
779 INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC,
780 ARRAY_SIZE(ar5416Bank6TPC), 3);
781 INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7,
782 ARRAY_SIZE(ar5416Bank7), 2);
783 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac,
784 ARRAY_SIZE(ar5416Addac), 2);
785 }
786}
787
788static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
789{
790 if (AR_SREV_9287_11_OR_LATER(ah))
791 INIT_INI_ARRAY(&ah->iniModesRxGain,
792 ar9287Modes_rx_gain_9287_1_1,
793 ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6);
794 else if (AR_SREV_9287_10(ah))
795 INIT_INI_ARRAY(&ah->iniModesRxGain,
796 ar9287Modes_rx_gain_9287_1_0,
797 ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_0), 6);
798 else if (AR_SREV_9280_20(ah))
799 ath9k_hw_init_rxgain_ini(ah);
800
801 if (AR_SREV_9287_11_OR_LATER(ah)) {
802 INIT_INI_ARRAY(&ah->iniModesTxGain,
803 ar9287Modes_tx_gain_9287_1_1,
804 ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6);
805 } else if (AR_SREV_9287_10(ah)) {
806 INIT_INI_ARRAY(&ah->iniModesTxGain,
807 ar9287Modes_tx_gain_9287_1_0,
808 ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_0), 6);
809 } else if (AR_SREV_9280_20(ah)) {
810 ath9k_hw_init_txgain_ini(ah);
811 } else if (AR_SREV_9285_12_OR_LATER(ah)) {
812 u32 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
813
814 /* txgain table */
815 if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) {
816 if (AR_SREV_9285E_20(ah)) {
817 INIT_INI_ARRAY(&ah->iniModesTxGain,
818 ar9285Modes_XE2_0_high_power,
819 ARRAY_SIZE(
820 ar9285Modes_XE2_0_high_power), 6);
821 } else {
822 INIT_INI_ARRAY(&ah->iniModesTxGain,
823 ar9285Modes_high_power_tx_gain_9285_1_2,
824 ARRAY_SIZE(
825 ar9285Modes_high_power_tx_gain_9285_1_2), 6);
826 }
827 } else {
828 if (AR_SREV_9285E_20(ah)) {
829 INIT_INI_ARRAY(&ah->iniModesTxGain,
830 ar9285Modes_XE2_0_normal_power,
831 ARRAY_SIZE(
832 ar9285Modes_XE2_0_normal_power), 6);
833 } else {
834 INIT_INI_ARRAY(&ah->iniModesTxGain,
835 ar9285Modes_original_tx_gain_9285_1_2,
836 ARRAY_SIZE(
837 ar9285Modes_original_tx_gain_9285_1_2), 6);
838 }
839 }
840 }
841}
842
843static void ath9k_hw_init_eeprom_fix(struct ath_hw *ah)
844{
845 struct base_eep_header *pBase = &(ah->eeprom.def.baseEepHeader);
846 struct ath_common *common = ath9k_hw_common(ah);
847
848 ah->need_an_top2_fixup = (ah->hw_version.devid == AR9280_DEVID_PCI) &&
849 (ah->eep_map != EEP_MAP_4KBITS) &&
850 ((pBase->version & 0xff) > 0x0a) &&
851 (pBase->pwdclkind == 0);
852
853 if (ah->need_an_top2_fixup)
854 ath_print(common, ATH_DBG_EEPROM,
855 "needs fixup for AR_AN_TOP2 register\n");
856} 511}
857 512
858int ath9k_hw_init(struct ath_hw *ah) 513/* Called for all hardware families */
514static int __ath9k_hw_init(struct ath_hw *ah)
859{ 515{
860 struct ath_common *common = ath9k_hw_common(ah); 516 struct ath_common *common = ath9k_hw_common(ah);
861 int r = 0; 517 int r = 0;
862 518
863 if (common->bus_ops->ath_bus_type != ATH_USB) { 519 if (ah->hw_version.devid == AR5416_AR9100_DEVID)
864 if (!ath9k_hw_devid_supported(ah->hw_version.devid)) { 520 ah->hw_version.macVersion = AR_SREV_VERSION_9100;
865 ath_print(common, ATH_DBG_FATAL,
866 "Unsupported device ID: 0x%0x\n",
867 ah->hw_version.devid);
868 return -EOPNOTSUPP;
869 }
870 }
871
872 ath9k_hw_init_defaults(ah);
873 ath9k_hw_init_config(ah);
874 521
875 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { 522 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
876 ath_print(common, ATH_DBG_FATAL, 523 ath_print(common, ATH_DBG_FATAL,
@@ -878,6 +525,11 @@ int ath9k_hw_init(struct ath_hw *ah)
878 return -EIO; 525 return -EIO;
879 } 526 }
880 527
528 ath9k_hw_init_defaults(ah);
529 ath9k_hw_init_config(ah);
530
531 ath9k_hw_attach_ops(ah);
532
881 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) { 533 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) {
882 ath_print(common, ATH_DBG_FATAL, "Couldn't wakeup chip\n"); 534 ath_print(common, ATH_DBG_FATAL, "Couldn't wakeup chip\n");
883 return -EIO; 535 return -EIO;
@@ -902,7 +554,7 @@ int ath9k_hw_init(struct ath_hw *ah)
902 else 554 else
903 ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD; 555 ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD;
904 556
905 if (!ath9k_hw_macversion_supported(ah->hw_version.macVersion)) { 557 if (!ath9k_hw_macversion_supported(ah)) {
906 ath_print(common, ATH_DBG_FATAL, 558 ath_print(common, ATH_DBG_FATAL,
907 "Mac Chip Rev 0x%02x.%x is not supported by " 559 "Mac Chip Rev 0x%02x.%x is not supported by "
908 "this driver\n", ah->hw_version.macVersion, 560 "this driver\n", ah->hw_version.macVersion,
@@ -910,28 +562,15 @@ int ath9k_hw_init(struct ath_hw *ah)
910 return -EOPNOTSUPP; 562 return -EOPNOTSUPP;
911 } 563 }
912 564
913 if (AR_SREV_9100(ah)) { 565 if (AR_SREV_9271(ah) || AR_SREV_9100(ah))
914 ah->iq_caldata.calData = &iq_cal_multi_sample;
915 ah->supp_cals = IQ_MISMATCH_CAL;
916 ah->is_pciexpress = false;
917 }
918
919 if (AR_SREV_9271(ah))
920 ah->is_pciexpress = false; 566 ah->is_pciexpress = false;
921 567
922 ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID); 568 ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID);
923
924 ath9k_hw_init_cal_settings(ah); 569 ath9k_hw_init_cal_settings(ah);
925 570
926 ah->ani_function = ATH9K_ANI_ALL; 571 ah->ani_function = ATH9K_ANI_ALL;
927 if (AR_SREV_9280_10_OR_LATER(ah)) { 572 if (AR_SREV_9280_10_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah))
928 ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL; 573 ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
929 ah->ath9k_hw_rf_set_freq = &ath9k_hw_ar9280_set_channel;
930 ah->ath9k_hw_spur_mitigate_freq = &ath9k_hw_9280_spur_mitigate;
931 } else {
932 ah->ath9k_hw_rf_set_freq = &ath9k_hw_set_channel;
933 ah->ath9k_hw_spur_mitigate_freq = &ath9k_hw_spur_mitigate;
934 }
935 574
936 ath9k_hw_init_mode_regs(ah); 575 ath9k_hw_init_mode_regs(ah);
937 576
@@ -940,15 +579,8 @@ int ath9k_hw_init(struct ath_hw *ah)
940 else 579 else
941 ath9k_hw_disablepcie(ah); 580 ath9k_hw_disablepcie(ah);
942 581
943 /* Support for Japan ch.14 (2484) spread */ 582 if (!AR_SREV_9300_20_OR_LATER(ah))
944 if (AR_SREV_9287_11_OR_LATER(ah)) { 583 ar9002_hw_cck_chan14_spread(ah);
945 INIT_INI_ARRAY(&ah->iniCckfirNormal,
946 ar9287Common_normal_cck_fir_coeff_92871_1,
947 ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_92871_1), 2);
948 INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
949 ar9287Common_japan_2484_cck_fir_coeff_92871_1,
950 ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_92871_1), 2);
951 }
952 584
953 r = ath9k_hw_post_init(ah); 585 r = ath9k_hw_post_init(ah);
954 if (r) 586 if (r)
@@ -959,8 +591,6 @@ int ath9k_hw_init(struct ath_hw *ah)
959 if (r) 591 if (r)
960 return r; 592 return r;
961 593
962 ath9k_hw_init_eeprom_fix(ah);
963
964 r = ath9k_hw_init_macaddr(ah); 594 r = ath9k_hw_init_macaddr(ah);
965 if (r) { 595 if (r) {
966 ath_print(common, ATH_DBG_FATAL, 596 ath_print(common, ATH_DBG_FATAL,
@@ -973,6 +603,9 @@ int ath9k_hw_init(struct ath_hw *ah)
973 else 603 else
974 ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S); 604 ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S);
975 605
606 if (AR_SREV_9300_20_OR_LATER(ah))
607 ar9003_hw_set_nf_limits(ah);
608
976 ath9k_init_nfcal_hist_buffer(ah); 609 ath9k_init_nfcal_hist_buffer(ah);
977 610
978 common->state = ATH_HW_INITIALIZED; 611 common->state = ATH_HW_INITIALIZED;
@@ -980,24 +613,50 @@ int ath9k_hw_init(struct ath_hw *ah)
980 return 0; 613 return 0;
981} 614}
982 615
983static void ath9k_hw_init_bb(struct ath_hw *ah, 616int ath9k_hw_init(struct ath_hw *ah)
984 struct ath9k_channel *chan)
985{ 617{
986 u32 synthDelay; 618 int ret;
619 struct ath_common *common = ath9k_hw_common(ah);
987 620
988 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; 621 /* These are all the AR5008/AR9001/AR9002 hardware family of chipsets */
989 if (IS_CHAN_B(chan)) 622 switch (ah->hw_version.devid) {
990 synthDelay = (4 * synthDelay) / 22; 623 case AR5416_DEVID_PCI:
991 else 624 case AR5416_DEVID_PCIE:
992 synthDelay /= 10; 625 case AR5416_AR9100_DEVID:
626 case AR9160_DEVID_PCI:
627 case AR9280_DEVID_PCI:
628 case AR9280_DEVID_PCIE:
629 case AR9285_DEVID_PCIE:
630 case AR9287_DEVID_PCI:
631 case AR9287_DEVID_PCIE:
632 case AR2427_DEVID_PCIE:
633 case AR9300_DEVID_PCIE:
634 break;
635 default:
636 if (common->bus_ops->ath_bus_type == ATH_USB)
637 break;
638 ath_print(common, ATH_DBG_FATAL,
639 "Hardware device ID 0x%04x not supported\n",
640 ah->hw_version.devid);
641 return -EOPNOTSUPP;
642 }
993 643
994 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); 644 ret = __ath9k_hw_init(ah);
645 if (ret) {
646 ath_print(common, ATH_DBG_FATAL,
647 "Unable to initialize hardware; "
648 "initialization status: %d\n", ret);
649 return ret;
650 }
995 651
996 udelay(synthDelay + BASE_ACTIVATE_DELAY); 652 return 0;
997} 653}
654EXPORT_SYMBOL(ath9k_hw_init);
998 655
999static void ath9k_hw_init_qos(struct ath_hw *ah) 656static void ath9k_hw_init_qos(struct ath_hw *ah)
1000{ 657{
658 ENABLE_REGWRITE_BUFFER(ah);
659
1001 REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa); 660 REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa);
1002 REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210); 661 REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210);
1003 662
@@ -1011,69 +670,16 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
1011 REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF); 670 REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF);
1012 REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF); 671 REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF);
1013 REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF); 672 REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF);
673
674 REGWRITE_BUFFER_FLUSH(ah);
675 DISABLE_REGWRITE_BUFFER(ah);
1014} 676}
1015 677
1016static void ath9k_hw_init_pll(struct ath_hw *ah, 678static void ath9k_hw_init_pll(struct ath_hw *ah,
1017 struct ath9k_channel *chan) 679 struct ath9k_channel *chan)
1018{ 680{
1019 u32 pll; 681 u32 pll = ath9k_hw_compute_pll_control(ah, chan);
1020
1021 if (AR_SREV_9100(ah)) {
1022 if (chan && IS_CHAN_5GHZ(chan))
1023 pll = 0x1450;
1024 else
1025 pll = 0x1458;
1026 } else {
1027 if (AR_SREV_9280_10_OR_LATER(ah)) {
1028 pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
1029
1030 if (chan && IS_CHAN_HALF_RATE(chan))
1031 pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
1032 else if (chan && IS_CHAN_QUARTER_RATE(chan))
1033 pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
1034
1035 if (chan && IS_CHAN_5GHZ(chan)) {
1036 pll |= SM(0x28, AR_RTC_9160_PLL_DIV);
1037
1038 682
1039 if (AR_SREV_9280_20(ah)) {
1040 if (((chan->channel % 20) == 0)
1041 || ((chan->channel % 10) == 0))
1042 pll = 0x2850;
1043 else
1044 pll = 0x142c;
1045 }
1046 } else {
1047 pll |= SM(0x2c, AR_RTC_9160_PLL_DIV);
1048 }
1049
1050 } else if (AR_SREV_9160_10_OR_LATER(ah)) {
1051
1052 pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
1053
1054 if (chan && IS_CHAN_HALF_RATE(chan))
1055 pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
1056 else if (chan && IS_CHAN_QUARTER_RATE(chan))
1057 pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
1058
1059 if (chan && IS_CHAN_5GHZ(chan))
1060 pll |= SM(0x50, AR_RTC_9160_PLL_DIV);
1061 else
1062 pll |= SM(0x58, AR_RTC_9160_PLL_DIV);
1063 } else {
1064 pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2;
1065
1066 if (chan && IS_CHAN_HALF_RATE(chan))
1067 pll |= SM(0x1, AR_RTC_PLL_CLKSEL);
1068 else if (chan && IS_CHAN_QUARTER_RATE(chan))
1069 pll |= SM(0x2, AR_RTC_PLL_CLKSEL);
1070
1071 if (chan && IS_CHAN_5GHZ(chan))
1072 pll |= SM(0xa, AR_RTC_PLL_DIV);
1073 else
1074 pll |= SM(0xb, AR_RTC_PLL_DIV);
1075 }
1076 }
1077 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll); 683 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
1078 684
1079 /* Switch the core clock for ar9271 to 117Mhz */ 685 /* Switch the core clock for ar9271 to 117Mhz */
@@ -1087,43 +693,6 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
1087 REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK); 693 REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
1088} 694}
1089 695
1090static void ath9k_hw_init_chain_masks(struct ath_hw *ah)
1091{
1092 int rx_chainmask, tx_chainmask;
1093
1094 rx_chainmask = ah->rxchainmask;
1095 tx_chainmask = ah->txchainmask;
1096
1097 switch (rx_chainmask) {
1098 case 0x5:
1099 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
1100 AR_PHY_SWAP_ALT_CHAIN);
1101 case 0x3:
1102 if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
1103 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
1104 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
1105 break;
1106 }
1107 case 0x1:
1108 case 0x2:
1109 case 0x7:
1110 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
1111 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
1112 break;
1113 default:
1114 break;
1115 }
1116
1117 REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask);
1118 if (tx_chainmask == 0x5) {
1119 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
1120 AR_PHY_SWAP_ALT_CHAIN);
1121 }
1122 if (AR_SREV_9100(ah))
1123 REG_WRITE(ah, AR_PHY_ANALOG_SWAP,
1124 REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001);
1125}
1126
1127static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah, 696static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
1128 enum nl80211_iftype opmode) 697 enum nl80211_iftype opmode)
1129{ 698{
@@ -1133,16 +702,30 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
1133 AR_IMR_RXORN | 702 AR_IMR_RXORN |
1134 AR_IMR_BCNMISC; 703 AR_IMR_BCNMISC;
1135 704
1136 if (ah->config.rx_intr_mitigation) 705 if (AR_SREV_9300_20_OR_LATER(ah)) {
1137 imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR; 706 imr_reg |= AR_IMR_RXOK_HP;
1138 else 707 if (ah->config.rx_intr_mitigation)
1139 imr_reg |= AR_IMR_RXOK; 708 imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
709 else
710 imr_reg |= AR_IMR_RXOK_LP;
1140 711
1141 imr_reg |= AR_IMR_TXOK; 712 } else {
713 if (ah->config.rx_intr_mitigation)
714 imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
715 else
716 imr_reg |= AR_IMR_RXOK;
717 }
718
719 if (ah->config.tx_intr_mitigation)
720 imr_reg |= AR_IMR_TXINTM | AR_IMR_TXMINTR;
721 else
722 imr_reg |= AR_IMR_TXOK;
1142 723
1143 if (opmode == NL80211_IFTYPE_AP) 724 if (opmode == NL80211_IFTYPE_AP)
1144 imr_reg |= AR_IMR_MIB; 725 imr_reg |= AR_IMR_MIB;
1145 726
727 ENABLE_REGWRITE_BUFFER(ah);
728
1146 REG_WRITE(ah, AR_IMR, imr_reg); 729 REG_WRITE(ah, AR_IMR, imr_reg);
1147 ah->imrs2_reg |= AR_IMR_S2_GTT; 730 ah->imrs2_reg |= AR_IMR_S2_GTT;
1148 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg); 731 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
@@ -1152,6 +735,16 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
1152 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, AR_INTR_SYNC_DEFAULT); 735 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, AR_INTR_SYNC_DEFAULT);
1153 REG_WRITE(ah, AR_INTR_SYNC_MASK, 0); 736 REG_WRITE(ah, AR_INTR_SYNC_MASK, 0);
1154 } 737 }
738
739 REGWRITE_BUFFER_FLUSH(ah);
740 DISABLE_REGWRITE_BUFFER(ah);
741
742 if (AR_SREV_9300_20_OR_LATER(ah)) {
743 REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0);
744 REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, 0);
745 REG_WRITE(ah, AR_INTR_PRIO_SYNC_ENABLE, 0);
746 REG_WRITE(ah, AR_INTR_PRIO_SYNC_MASK, 0);
747 }
1155} 748}
1156 749
1157static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us) 750static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
@@ -1237,14 +830,10 @@ void ath9k_hw_deinit(struct ath_hw *ah)
1237 if (common->state < ATH_HW_INITIALIZED) 830 if (common->state < ATH_HW_INITIALIZED)
1238 goto free_hw; 831 goto free_hw;
1239 832
1240 if (!AR_SREV_9100(ah))
1241 ath9k_hw_ani_disable(ah);
1242
1243 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP); 833 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
1244 834
1245free_hw: 835free_hw:
1246 if (!AR_SREV_9280_10_OR_LATER(ah)) 836 ath9k_hw_rf_free_ext_banks(ah);
1247 ath9k_hw_rf_free_ext_banks(ah);
1248} 837}
1249EXPORT_SYMBOL(ath9k_hw_deinit); 838EXPORT_SYMBOL(ath9k_hw_deinit);
1250 839
@@ -1252,73 +841,7 @@ EXPORT_SYMBOL(ath9k_hw_deinit);
1252/* INI */ 841/* INI */
1253/*******/ 842/*******/
1254 843
1255static void ath9k_hw_override_ini(struct ath_hw *ah, 844u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan)
1256 struct ath9k_channel *chan)
1257{
1258 u32 val;
1259
1260 /*
1261 * Set the RX_ABORT and RX_DIS and clear if off only after
1262 * RXE is set for MAC. This prevents frames with corrupted
1263 * descriptor status.
1264 */
1265 REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
1266
1267 if (AR_SREV_9280_10_OR_LATER(ah)) {
1268 val = REG_READ(ah, AR_PCU_MISC_MODE2);
1269
1270 if (!AR_SREV_9271(ah))
1271 val &= ~AR_PCU_MISC_MODE2_HWWAR1;
1272
1273 if (AR_SREV_9287_10_OR_LATER(ah))
1274 val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
1275
1276 REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
1277 }
1278
1279 if (!AR_SREV_5416_20_OR_LATER(ah) ||
1280 AR_SREV_9280_10_OR_LATER(ah))
1281 return;
1282 /*
1283 * Disable BB clock gating
1284 * Necessary to avoid issues on AR5416 2.0
1285 */
1286 REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
1287
1288 /*
1289 * Disable RIFS search on some chips to avoid baseband
1290 * hang issues.
1291 */
1292 if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) {
1293 val = REG_READ(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS);
1294 val &= ~AR_PHY_RIFS_INIT_DELAY;
1295 REG_WRITE(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS, val);
1296 }
1297}
1298
1299static void ath9k_olc_init(struct ath_hw *ah)
1300{
1301 u32 i;
1302
1303 if (OLC_FOR_AR9287_10_LATER) {
1304 REG_SET_BIT(ah, AR_PHY_TX_PWRCTRL9,
1305 AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL);
1306 ath9k_hw_analog_shift_rmw(ah, AR9287_AN_TXPC0,
1307 AR9287_AN_TXPC0_TXPCMODE,
1308 AR9287_AN_TXPC0_TXPCMODE_S,
1309 AR9287_AN_TXPC0_TXPCMODE_TEMPSENSE);
1310 udelay(100);
1311 } else {
1312 for (i = 0; i < AR9280_TX_GAIN_TABLE_SIZE; i++)
1313 ah->originalGain[i] =
1314 MS(REG_READ(ah, AR_PHY_TX_GAIN_TBL1 + i * 4),
1315 AR_PHY_TX_GAIN);
1316 ah->PDADCdelta = 0;
1317 }
1318}
1319
1320static u32 ath9k_regd_get_ctl(struct ath_regulatory *reg,
1321 struct ath9k_channel *chan)
1322{ 845{
1323 u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band); 846 u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band);
1324 847
@@ -1332,193 +855,24 @@ static u32 ath9k_regd_get_ctl(struct ath_regulatory *reg,
1332 return ctl; 855 return ctl;
1333} 856}
1334 857
1335static int ath9k_hw_process_ini(struct ath_hw *ah,
1336 struct ath9k_channel *chan)
1337{
1338 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
1339 int i, regWrites = 0;
1340 struct ieee80211_channel *channel = chan->chan;
1341 u32 modesIndex, freqIndex;
1342
1343 switch (chan->chanmode) {
1344 case CHANNEL_A:
1345 case CHANNEL_A_HT20:
1346 modesIndex = 1;
1347 freqIndex = 1;
1348 break;
1349 case CHANNEL_A_HT40PLUS:
1350 case CHANNEL_A_HT40MINUS:
1351 modesIndex = 2;
1352 freqIndex = 1;
1353 break;
1354 case CHANNEL_G:
1355 case CHANNEL_G_HT20:
1356 case CHANNEL_B:
1357 modesIndex = 4;
1358 freqIndex = 2;
1359 break;
1360 case CHANNEL_G_HT40PLUS:
1361 case CHANNEL_G_HT40MINUS:
1362 modesIndex = 3;
1363 freqIndex = 2;
1364 break;
1365
1366 default:
1367 return -EINVAL;
1368 }
1369
1370 /* Set correct baseband to analog shift setting to access analog chips */
1371 REG_WRITE(ah, AR_PHY(0), 0x00000007);
1372
1373 /* Write ADDAC shifts */
1374 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
1375 ah->eep_ops->set_addac(ah, chan);
1376
1377 if (AR_SREV_5416_22_OR_LATER(ah)) {
1378 REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
1379 } else {
1380 struct ar5416IniArray temp;
1381 u32 addacSize =
1382 sizeof(u32) * ah->iniAddac.ia_rows *
1383 ah->iniAddac.ia_columns;
1384
1385 /* For AR5416 2.0/2.1 */
1386 memcpy(ah->addac5416_21,
1387 ah->iniAddac.ia_array, addacSize);
1388
1389 /* override CLKDRV value at [row, column] = [31, 1] */
1390 (ah->addac5416_21)[31 * ah->iniAddac.ia_columns + 1] = 0;
1391
1392 temp.ia_array = ah->addac5416_21;
1393 temp.ia_columns = ah->iniAddac.ia_columns;
1394 temp.ia_rows = ah->iniAddac.ia_rows;
1395 REG_WRITE_ARRAY(&temp, 1, regWrites);
1396 }
1397
1398 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
1399
1400 for (i = 0; i < ah->iniModes.ia_rows; i++) {
1401 u32 reg = INI_RA(&ah->iniModes, i, 0);
1402 u32 val = INI_RA(&ah->iniModes, i, modesIndex);
1403
1404 if (reg == AR_AN_TOP2 && ah->need_an_top2_fixup)
1405 val &= ~AR_AN_TOP2_PWDCLKIND;
1406
1407 REG_WRITE(ah, reg, val);
1408
1409 if (reg >= 0x7800 && reg < 0x78a0
1410 && ah->config.analog_shiftreg) {
1411 udelay(100);
1412 }
1413
1414 DO_DELAY(regWrites);
1415 }
1416
1417 if (AR_SREV_9280(ah) || AR_SREV_9287_10_OR_LATER(ah))
1418 REG_WRITE_ARRAY(&ah->iniModesRxGain, modesIndex, regWrites);
1419
1420 if (AR_SREV_9280(ah) || AR_SREV_9285_12_OR_LATER(ah) ||
1421 AR_SREV_9287_10_OR_LATER(ah))
1422 REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
1423
1424 if (AR_SREV_9271_10(ah))
1425 REG_WRITE_ARRAY(&ah->iniModes_9271_1_0_only,
1426 modesIndex, regWrites);
1427
1428 /* Write common array parameters */
1429 for (i = 0; i < ah->iniCommon.ia_rows; i++) {
1430 u32 reg = INI_RA(&ah->iniCommon, i, 0);
1431 u32 val = INI_RA(&ah->iniCommon, i, 1);
1432
1433 REG_WRITE(ah, reg, val);
1434
1435 if (reg >= 0x7800 && reg < 0x78a0
1436 && ah->config.analog_shiftreg) {
1437 udelay(100);
1438 }
1439
1440 DO_DELAY(regWrites);
1441 }
1442
1443 if (AR_SREV_9271(ah)) {
1444 if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) == 1)
1445 REG_WRITE_ARRAY(&ah->iniModes_high_power_tx_gain_9271,
1446 modesIndex, regWrites);
1447 else
1448 REG_WRITE_ARRAY(&ah->iniModes_normal_power_tx_gain_9271,
1449 modesIndex, regWrites);
1450 }
1451
1452 ath9k_hw_write_regs(ah, freqIndex, regWrites);
1453
1454 if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) {
1455 REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex,
1456 regWrites);
1457 }
1458
1459 ath9k_hw_override_ini(ah, chan);
1460 ath9k_hw_set_regs(ah, chan);
1461 ath9k_hw_init_chain_masks(ah);
1462
1463 if (OLC_FOR_AR9280_20_LATER)
1464 ath9k_olc_init(ah);
1465
1466 /* Set TX power */
1467 ah->eep_ops->set_txpower(ah, chan,
1468 ath9k_regd_get_ctl(regulatory, chan),
1469 channel->max_antenna_gain * 2,
1470 channel->max_power * 2,
1471 min((u32) MAX_RATE_POWER,
1472 (u32) regulatory->power_limit));
1473
1474 /* Write analog registers */
1475 if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
1476 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
1477 "ar5416SetRfRegs failed\n");
1478 return -EIO;
1479 }
1480
1481 return 0;
1482}
1483
1484/****************************************/ 858/****************************************/
1485/* Reset and Channel Switching Routines */ 859/* Reset and Channel Switching Routines */
1486/****************************************/ 860/****************************************/
1487 861
1488static void ath9k_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan)
1489{
1490 u32 rfMode = 0;
1491
1492 if (chan == NULL)
1493 return;
1494
1495 rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
1496 ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
1497
1498 if (!AR_SREV_9280_10_OR_LATER(ah))
1499 rfMode |= (IS_CHAN_5GHZ(chan)) ?
1500 AR_PHY_MODE_RF5GHZ : AR_PHY_MODE_RF2GHZ;
1501
1502 if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan))
1503 rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
1504
1505 REG_WRITE(ah, AR_PHY_MODE, rfMode);
1506}
1507
1508static void ath9k_hw_mark_phy_inactive(struct ath_hw *ah)
1509{
1510 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
1511}
1512
1513static inline void ath9k_hw_set_dma(struct ath_hw *ah) 862static inline void ath9k_hw_set_dma(struct ath_hw *ah)
1514{ 863{
864 struct ath_common *common = ath9k_hw_common(ah);
1515 u32 regval; 865 u32 regval;
1516 866
867 ENABLE_REGWRITE_BUFFER(ah);
868
1517 /* 869 /*
1518 * set AHB_MODE not to do cacheline prefetches 870 * set AHB_MODE not to do cacheline prefetches
1519 */ 871 */
1520 regval = REG_READ(ah, AR_AHB_MODE); 872 if (!AR_SREV_9300_20_OR_LATER(ah)) {
1521 REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN); 873 regval = REG_READ(ah, AR_AHB_MODE);
874 REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN);
875 }
1522 876
1523 /* 877 /*
1524 * let mac dma reads be in 128 byte chunks 878 * let mac dma reads be in 128 byte chunks
@@ -1526,12 +880,18 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
1526 regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK; 880 regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK;
1527 REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B); 881 REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B);
1528 882
883 REGWRITE_BUFFER_FLUSH(ah);
884 DISABLE_REGWRITE_BUFFER(ah);
885
1529 /* 886 /*
1530 * Restore TX Trigger Level to its pre-reset value. 887 * Restore TX Trigger Level to its pre-reset value.
1531 * The initial value depends on whether aggregation is enabled, and is 888 * The initial value depends on whether aggregation is enabled, and is
1532 * adjusted whenever underruns are detected. 889 * adjusted whenever underruns are detected.
1533 */ 890 */
1534 REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->tx_trig_level); 891 if (!AR_SREV_9300_20_OR_LATER(ah))
892 REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->tx_trig_level);
893
894 ENABLE_REGWRITE_BUFFER(ah);
1535 895
1536 /* 896 /*
1537 * let mac dma writes be in 128 byte chunks 897 * let mac dma writes be in 128 byte chunks
@@ -1544,6 +904,14 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
1544 */ 904 */
1545 REG_WRITE(ah, AR_RXFIFO_CFG, 0x200); 905 REG_WRITE(ah, AR_RXFIFO_CFG, 0x200);
1546 906
907 if (AR_SREV_9300_20_OR_LATER(ah)) {
908 REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_HP, 0x1);
909 REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_LP, 0x1);
910
911 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
912 ah->caps.rx_status_len);
913 }
914
1547 /* 915 /*
1548 * reduce the number of usable entries in PCU TXBUF to avoid 916 * reduce the number of usable entries in PCU TXBUF to avoid
1549 * wrap around issues. 917 * wrap around issues.
@@ -1559,6 +927,12 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
1559 REG_WRITE(ah, AR_PCU_TXBUF_CTRL, 927 REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
1560 AR_PCU_TXBUF_CTRL_USABLE_SIZE); 928 AR_PCU_TXBUF_CTRL_USABLE_SIZE);
1561 } 929 }
930
931 REGWRITE_BUFFER_FLUSH(ah);
932 DISABLE_REGWRITE_BUFFER(ah);
933
934 if (AR_SREV_9300_20_OR_LATER(ah))
935 ath9k_hw_reset_txstatus_ring(ah);
1562} 936}
1563 937
1564static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode) 938static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
@@ -1586,10 +960,8 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
1586 } 960 }
1587} 961}
1588 962
1589static inline void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, 963void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
1590 u32 coef_scaled, 964 u32 *coef_mantissa, u32 *coef_exponent)
1591 u32 *coef_mantissa,
1592 u32 *coef_exponent)
1593{ 965{
1594 u32 coef_exp, coef_man; 966 u32 coef_exp, coef_man;
1595 967
@@ -1605,40 +977,6 @@ static inline void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah,
1605 *coef_exponent = coef_exp - 16; 977 *coef_exponent = coef_exp - 16;
1606} 978}
1607 979
1608static void ath9k_hw_set_delta_slope(struct ath_hw *ah,
1609 struct ath9k_channel *chan)
1610{
1611 u32 coef_scaled, ds_coef_exp, ds_coef_man;
1612 u32 clockMhzScaled = 0x64000000;
1613 struct chan_centers centers;
1614
1615 if (IS_CHAN_HALF_RATE(chan))
1616 clockMhzScaled = clockMhzScaled >> 1;
1617 else if (IS_CHAN_QUARTER_RATE(chan))
1618 clockMhzScaled = clockMhzScaled >> 2;
1619
1620 ath9k_hw_get_channel_centers(ah, chan, &centers);
1621 coef_scaled = clockMhzScaled / centers.synth_center;
1622
1623 ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
1624 &ds_coef_exp);
1625
1626 REG_RMW_FIELD(ah, AR_PHY_TIMING3,
1627 AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
1628 REG_RMW_FIELD(ah, AR_PHY_TIMING3,
1629 AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
1630
1631 coef_scaled = (9 * coef_scaled) / 10;
1632
1633 ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
1634 &ds_coef_exp);
1635
1636 REG_RMW_FIELD(ah, AR_PHY_HALFGI,
1637 AR_PHY_HALFGI_DSC_MAN, ds_coef_man);
1638 REG_RMW_FIELD(ah, AR_PHY_HALFGI,
1639 AR_PHY_HALFGI_DSC_EXP, ds_coef_exp);
1640}
1641
1642static bool ath9k_hw_set_reset(struct ath_hw *ah, int type) 980static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1643{ 981{
1644 u32 rst_flags; 982 u32 rst_flags;
@@ -1652,6 +990,8 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1652 (void)REG_READ(ah, AR_RTC_DERIVED_CLK); 990 (void)REG_READ(ah, AR_RTC_DERIVED_CLK);
1653 } 991 }
1654 992
993 ENABLE_REGWRITE_BUFFER(ah);
994
1655 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | 995 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
1656 AR_RTC_FORCE_WAKE_ON_INT); 996 AR_RTC_FORCE_WAKE_ON_INT);
1657 997
@@ -1663,11 +1003,16 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1663 if (tmpReg & 1003 if (tmpReg &
1664 (AR_INTR_SYNC_LOCAL_TIMEOUT | 1004 (AR_INTR_SYNC_LOCAL_TIMEOUT |
1665 AR_INTR_SYNC_RADM_CPL_TIMEOUT)) { 1005 AR_INTR_SYNC_RADM_CPL_TIMEOUT)) {
1006 u32 val;
1666 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); 1007 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
1667 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF); 1008
1668 } else { 1009 val = AR_RC_HOSTIF;
1010 if (!AR_SREV_9300_20_OR_LATER(ah))
1011 val |= AR_RC_AHB;
1012 REG_WRITE(ah, AR_RC, val);
1013
1014 } else if (!AR_SREV_9300_20_OR_LATER(ah))
1669 REG_WRITE(ah, AR_RC, AR_RC_AHB); 1015 REG_WRITE(ah, AR_RC, AR_RC_AHB);
1670 }
1671 1016
1672 rst_flags = AR_RTC_RC_MAC_WARM; 1017 rst_flags = AR_RTC_RC_MAC_WARM;
1673 if (type == ATH9K_RESET_COLD) 1018 if (type == ATH9K_RESET_COLD)
@@ -1675,6 +1020,10 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1675 } 1020 }
1676 1021
1677 REG_WRITE(ah, AR_RTC_RC, rst_flags); 1022 REG_WRITE(ah, AR_RTC_RC, rst_flags);
1023
1024 REGWRITE_BUFFER_FLUSH(ah);
1025 DISABLE_REGWRITE_BUFFER(ah);
1026
1678 udelay(50); 1027 udelay(50);
1679 1028
1680 REG_WRITE(ah, AR_RTC_RC, 0); 1029 REG_WRITE(ah, AR_RTC_RC, 0);
@@ -1695,16 +1044,23 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1695 1044
1696static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) 1045static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
1697{ 1046{
1047 ENABLE_REGWRITE_BUFFER(ah);
1048
1698 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | 1049 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
1699 AR_RTC_FORCE_WAKE_ON_INT); 1050 AR_RTC_FORCE_WAKE_ON_INT);
1700 1051
1701 if (!AR_SREV_9100(ah)) 1052 if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
1702 REG_WRITE(ah, AR_RC, AR_RC_AHB); 1053 REG_WRITE(ah, AR_RC, AR_RC_AHB);
1703 1054
1704 REG_WRITE(ah, AR_RTC_RESET, 0); 1055 REG_WRITE(ah, AR_RTC_RESET, 0);
1705 udelay(2);
1706 1056
1707 if (!AR_SREV_9100(ah)) 1057 REGWRITE_BUFFER_FLUSH(ah);
1058 DISABLE_REGWRITE_BUFFER(ah);
1059
1060 if (!AR_SREV_9300_20_OR_LATER(ah))
1061 udelay(2);
1062
1063 if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
1708 REG_WRITE(ah, AR_RC, 0); 1064 REG_WRITE(ah, AR_RC, 0);
1709 1065
1710 REG_WRITE(ah, AR_RTC_RESET, 1); 1066 REG_WRITE(ah, AR_RTC_RESET, 1);
@@ -1740,34 +1096,6 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
1740 } 1096 }
1741} 1097}
1742 1098
1743static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan)
1744{
1745 u32 phymode;
1746 u32 enableDacFifo = 0;
1747
1748 if (AR_SREV_9285_10_OR_LATER(ah))
1749 enableDacFifo = (REG_READ(ah, AR_PHY_TURBO) &
1750 AR_PHY_FC_ENABLE_DAC_FIFO);
1751
1752 phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40
1753 | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH | enableDacFifo;
1754
1755 if (IS_CHAN_HT40(chan)) {
1756 phymode |= AR_PHY_FC_DYN2040_EN;
1757
1758 if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
1759 (chan->chanmode == CHANNEL_G_HT40PLUS))
1760 phymode |= AR_PHY_FC_DYN2040_PRI_CH;
1761
1762 }
1763 REG_WRITE(ah, AR_PHY_TURBO, phymode);
1764
1765 ath9k_hw_set11nmac2040(ah);
1766
1767 REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
1768 REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
1769}
1770
1771static bool ath9k_hw_chip_reset(struct ath_hw *ah, 1099static bool ath9k_hw_chip_reset(struct ath_hw *ah,
1772 struct ath9k_channel *chan) 1100 struct ath9k_channel *chan)
1773{ 1101{
@@ -1793,7 +1121,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1793 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 1121 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
1794 struct ath_common *common = ath9k_hw_common(ah); 1122 struct ath_common *common = ath9k_hw_common(ah);
1795 struct ieee80211_channel *channel = chan->chan; 1123 struct ieee80211_channel *channel = chan->chan;
1796 u32 synthDelay, qnum; 1124 u32 qnum;
1797 int r; 1125 int r;
1798 1126
1799 for (qnum = 0; qnum < AR_NUM_QCU; qnum++) { 1127 for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
@@ -1805,17 +1133,15 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1805 } 1133 }
1806 } 1134 }
1807 1135
1808 REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN); 1136 if (!ath9k_hw_rfbus_req(ah)) {
1809 if (!ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
1810 AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT)) {
1811 ath_print(common, ATH_DBG_FATAL, 1137 ath_print(common, ATH_DBG_FATAL,
1812 "Could not kill baseband RX\n"); 1138 "Could not kill baseband RX\n");
1813 return false; 1139 return false;
1814 } 1140 }
1815 1141
1816 ath9k_hw_set_regs(ah, chan); 1142 ath9k_hw_set_channel_regs(ah, chan);
1817 1143
1818 r = ah->ath9k_hw_rf_set_freq(ah, chan); 1144 r = ath9k_hw_rf_set_freq(ah, chan);
1819 if (r) { 1145 if (r) {
1820 ath_print(common, ATH_DBG_FATAL, 1146 ath_print(common, ATH_DBG_FATAL,
1821 "Failed to set channel\n"); 1147 "Failed to set channel\n");
@@ -1829,20 +1155,12 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1829 min((u32) MAX_RATE_POWER, 1155 min((u32) MAX_RATE_POWER,
1830 (u32) regulatory->power_limit)); 1156 (u32) regulatory->power_limit));
1831 1157
1832 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; 1158 ath9k_hw_rfbus_done(ah);
1833 if (IS_CHAN_B(chan))
1834 synthDelay = (4 * synthDelay) / 22;
1835 else
1836 synthDelay /= 10;
1837
1838 udelay(synthDelay + BASE_ACTIVATE_DELAY);
1839
1840 REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
1841 1159
1842 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) 1160 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
1843 ath9k_hw_set_delta_slope(ah, chan); 1161 ath9k_hw_set_delta_slope(ah, chan);
1844 1162
1845 ah->ath9k_hw_spur_mitigate_freq(ah, chan); 1163 ath9k_hw_spur_mitigate_freq(ah, chan);
1846 1164
1847 if (!chan->oneTimeCalsDone) 1165 if (!chan->oneTimeCalsDone)
1848 chan->oneTimeCalsDone = true; 1166 chan->oneTimeCalsDone = true;
@@ -1850,17 +1168,33 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1850 return true; 1168 return true;
1851} 1169}
1852 1170
1853static void ath9k_enable_rfkill(struct ath_hw *ah) 1171bool ath9k_hw_check_alive(struct ath_hw *ah)
1854{ 1172{
1855 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, 1173 int count = 50;
1856 AR_GPIO_INPUT_EN_VAL_RFSILENT_BB); 1174 u32 reg;
1175
1176 if (AR_SREV_9285_10_OR_LATER(ah))
1177 return true;
1178
1179 do {
1180 reg = REG_READ(ah, AR_OBS_BUS_1);
1857 1181
1858 REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2, 1182 if ((reg & 0x7E7FFFEF) == 0x00702400)
1859 AR_GPIO_INPUT_MUX2_RFSILENT); 1183 continue;
1860 1184
1861 ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio); 1185 switch (reg & 0x7E000B00) {
1862 REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB); 1186 case 0x1E000000:
1187 case 0x52000B00:
1188 case 0x18000B00:
1189 continue;
1190 default:
1191 return true;
1192 }
1193 } while (count-- > 0);
1194
1195 return false;
1863} 1196}
1197EXPORT_SYMBOL(ath9k_hw_check_alive);
1864 1198
1865int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, 1199int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1866 bool bChannelChange) 1200 bool bChannelChange)
@@ -1871,11 +1205,18 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1871 u32 saveDefAntenna; 1205 u32 saveDefAntenna;
1872 u32 macStaId1; 1206 u32 macStaId1;
1873 u64 tsf = 0; 1207 u64 tsf = 0;
1874 int i, rx_chainmask, r; 1208 int i, r;
1875 1209
1876 ah->txchainmask = common->tx_chainmask; 1210 ah->txchainmask = common->tx_chainmask;
1877 ah->rxchainmask = common->rx_chainmask; 1211 ah->rxchainmask = common->rx_chainmask;
1878 1212
1213 if (!ah->chip_fullsleep) {
1214 ath9k_hw_abortpcurecv(ah);
1215 if (!ath9k_hw_stopdmarecv(ah))
1216 ath_print(common, ATH_DBG_XMIT,
1217 "Failed to stop receive dma\n");
1218 }
1219
1879 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) 1220 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
1880 return -EIO; 1221 return -EIO;
1881 1222
@@ -1888,8 +1229,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1888 (chan->channel != ah->curchan->channel) && 1229 (chan->channel != ah->curchan->channel) &&
1889 ((chan->channelFlags & CHANNEL_ALL) == 1230 ((chan->channelFlags & CHANNEL_ALL) ==
1890 (ah->curchan->channelFlags & CHANNEL_ALL)) && 1231 (ah->curchan->channelFlags & CHANNEL_ALL)) &&
1891 !(AR_SREV_9280(ah) || IS_CHAN_A_5MHZ_SPACED(chan) || 1232 !AR_SREV_9280(ah)) {
1892 IS_CHAN_A_5MHZ_SPACED(ah->curchan))) {
1893 1233
1894 if (ath9k_hw_channel_change(ah, chan)) { 1234 if (ath9k_hw_channel_change(ah, chan)) {
1895 ath9k_hw_loadnf(ah, ah->curchan); 1235 ath9k_hw_loadnf(ah, ah->curchan);
@@ -1943,16 +1283,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1943 if (AR_SREV_9280_10_OR_LATER(ah)) 1283 if (AR_SREV_9280_10_OR_LATER(ah))
1944 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE); 1284 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
1945 1285
1946 if (AR_SREV_9287_12_OR_LATER(ah)) {
1947 /* Enable ASYNC FIFO */
1948 REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
1949 AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL);
1950 REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO);
1951 REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
1952 AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
1953 REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
1954 AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
1955 }
1956 r = ath9k_hw_process_ini(ah, chan); 1286 r = ath9k_hw_process_ini(ah, chan);
1957 if (r) 1287 if (r)
1958 return r; 1288 return r;
@@ -1977,9 +1307,13 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1977 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) 1307 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
1978 ath9k_hw_set_delta_slope(ah, chan); 1308 ath9k_hw_set_delta_slope(ah, chan);
1979 1309
1980 ah->ath9k_hw_spur_mitigate_freq(ah, chan); 1310 ath9k_hw_spur_mitigate_freq(ah, chan);
1981 ah->eep_ops->set_board_values(ah, chan); 1311 ah->eep_ops->set_board_values(ah, chan);
1982 1312
1313 ath9k_hw_set_operating_mode(ah, ah->opmode);
1314
1315 ENABLE_REGWRITE_BUFFER(ah);
1316
1983 REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr)); 1317 REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
1984 REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4) 1318 REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4)
1985 | macStaId1 1319 | macStaId1
@@ -1987,25 +1321,27 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1987 | (ah->config. 1321 | (ah->config.
1988 ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0) 1322 ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
1989 | ah->sta_id1_defaults); 1323 | ah->sta_id1_defaults);
1990 ath9k_hw_set_operating_mode(ah, ah->opmode);
1991
1992 ath_hw_setbssidmask(common); 1324 ath_hw_setbssidmask(common);
1993
1994 REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna); 1325 REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
1995
1996 ath9k_hw_write_associd(ah); 1326 ath9k_hw_write_associd(ah);
1997
1998 REG_WRITE(ah, AR_ISR, ~0); 1327 REG_WRITE(ah, AR_ISR, ~0);
1999
2000 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR); 1328 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
2001 1329
2002 r = ah->ath9k_hw_rf_set_freq(ah, chan); 1330 REGWRITE_BUFFER_FLUSH(ah);
1331 DISABLE_REGWRITE_BUFFER(ah);
1332
1333 r = ath9k_hw_rf_set_freq(ah, chan);
2003 if (r) 1334 if (r)
2004 return r; 1335 return r;
2005 1336
1337 ENABLE_REGWRITE_BUFFER(ah);
1338
2006 for (i = 0; i < AR_NUM_DCU; i++) 1339 for (i = 0; i < AR_NUM_DCU; i++)
2007 REG_WRITE(ah, AR_DQCUMASK(i), 1 << i); 1340 REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
2008 1341
1342 REGWRITE_BUFFER_FLUSH(ah);
1343 DISABLE_REGWRITE_BUFFER(ah);
1344
2009 ah->intr_txqs = 0; 1345 ah->intr_txqs = 0;
2010 for (i = 0; i < ah->caps.total_queues; i++) 1346 for (i = 0; i < ah->caps.total_queues; i++)
2011 ath9k_hw_resettxqueue(ah, i); 1347 ath9k_hw_resettxqueue(ah, i);
@@ -2018,25 +1354,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2018 1354
2019 ath9k_hw_init_global_settings(ah); 1355 ath9k_hw_init_global_settings(ah);
2020 1356
2021 if (AR_SREV_9287_12_OR_LATER(ah)) { 1357 if (!AR_SREV_9300_20_OR_LATER(ah)) {
2022 REG_WRITE(ah, AR_D_GBL_IFS_SIFS, 1358 ar9002_hw_enable_async_fifo(ah);
2023 AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR); 1359 ar9002_hw_enable_wep_aggregation(ah);
2024 REG_WRITE(ah, AR_D_GBL_IFS_SLOT,
2025 AR_D_GBL_IFS_SLOT_ASYNC_FIFO_DUR);
2026 REG_WRITE(ah, AR_D_GBL_IFS_EIFS,
2027 AR_D_GBL_IFS_EIFS_ASYNC_FIFO_DUR);
2028
2029 REG_WRITE(ah, AR_TIME_OUT, AR_TIME_OUT_ACK_CTS_ASYNC_FIFO_DUR);
2030 REG_WRITE(ah, AR_USEC, AR_USEC_ASYNC_FIFO_DUR);
2031
2032 REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER,
2033 AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768);
2034 REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN,
2035 AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL);
2036 }
2037 if (AR_SREV_9287_12_OR_LATER(ah)) {
2038 REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
2039 AR_PCU_MISC_MODE2_ENABLE_AGGWEP);
2040 } 1360 }
2041 1361
2042 REG_WRITE(ah, AR_STA_ID1, 1362 REG_WRITE(ah, AR_STA_ID1,
@@ -2051,19 +1371,24 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2051 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000); 1371 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
2052 } 1372 }
2053 1373
1374 if (ah->config.tx_intr_mitigation) {
1375 REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, 300);
1376 REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, 750);
1377 }
1378
2054 ath9k_hw_init_bb(ah, chan); 1379 ath9k_hw_init_bb(ah, chan);
2055 1380
2056 if (!ath9k_hw_init_cal(ah, chan)) 1381 if (!ath9k_hw_init_cal(ah, chan))
2057 return -EIO; 1382 return -EIO;
2058 1383
2059 rx_chainmask = ah->rxchainmask; 1384 ENABLE_REGWRITE_BUFFER(ah);
2060 if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) {
2061 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
2062 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
2063 }
2064 1385
1386 ath9k_hw_restore_chainmask(ah);
2065 REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ); 1387 REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ);
2066 1388
1389 REGWRITE_BUFFER_FLUSH(ah);
1390 DISABLE_REGWRITE_BUFFER(ah);
1391
2067 /* 1392 /*
2068 * For big endian systems turn on swapping for descriptors 1393 * For big endian systems turn on swapping for descriptors
2069 */ 1394 */
@@ -2093,6 +1418,11 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2093 if (ah->btcoex_hw.enabled) 1418 if (ah->btcoex_hw.enabled)
2094 ath9k_hw_btcoex_enable(ah); 1419 ath9k_hw_btcoex_enable(ah);
2095 1420
1421 if (AR_SREV_9300_20_OR_LATER(ah)) {
1422 ath9k_hw_loadnf(ah, curchan);
1423 ath9k_hw_start_nfcal(ah);
1424 }
1425
2096 return 0; 1426 return 0;
2097} 1427}
2098EXPORT_SYMBOL(ath9k_hw_reset); 1428EXPORT_SYMBOL(ath9k_hw_reset);
@@ -2379,21 +1709,35 @@ EXPORT_SYMBOL(ath9k_hw_keyisvalid);
2379/* Power Management (Chipset) */ 1709/* Power Management (Chipset) */
2380/******************************/ 1710/******************************/
2381 1711
1712/*
1713 * Notify Power Mgt is disabled in self-generated frames.
1714 * If requested, force chip to sleep.
1715 */
2382static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip) 1716static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
2383{ 1717{
2384 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); 1718 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
2385 if (setChip) { 1719 if (setChip) {
1720 /*
1721 * Clear the RTC force wake bit to allow the
1722 * mac to go to sleep.
1723 */
2386 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, 1724 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
2387 AR_RTC_FORCE_WAKE_EN); 1725 AR_RTC_FORCE_WAKE_EN);
2388 if (!AR_SREV_9100(ah)) 1726 if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
2389 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF); 1727 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
2390 1728
1729 /* Shutdown chip. Active low */
2391 if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah)) 1730 if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah))
2392 REG_CLR_BIT(ah, (AR_RTC_RESET), 1731 REG_CLR_BIT(ah, (AR_RTC_RESET),
2393 AR_RTC_RESET_EN); 1732 AR_RTC_RESET_EN);
2394 } 1733 }
2395} 1734}
2396 1735
1736/*
1737 * Notify Power Management is enabled in self-generating
1738 * frames. If request, set power mode of chip to
1739 * auto/normal. Duration in units of 128us (1/8 TU).
1740 */
2397static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip) 1741static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
2398{ 1742{
2399 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); 1743 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
@@ -2401,9 +1745,14 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
2401 struct ath9k_hw_capabilities *pCap = &ah->caps; 1745 struct ath9k_hw_capabilities *pCap = &ah->caps;
2402 1746
2403 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { 1747 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
1748 /* Set WakeOnInterrupt bit; clear ForceWake bit */
2404 REG_WRITE(ah, AR_RTC_FORCE_WAKE, 1749 REG_WRITE(ah, AR_RTC_FORCE_WAKE,
2405 AR_RTC_FORCE_WAKE_ON_INT); 1750 AR_RTC_FORCE_WAKE_ON_INT);
2406 } else { 1751 } else {
1752 /*
1753 * Clear the RTC force wake bit to allow the
1754 * mac to go to sleep.
1755 */
2407 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, 1756 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
2408 AR_RTC_FORCE_WAKE_EN); 1757 AR_RTC_FORCE_WAKE_EN);
2409 } 1758 }
@@ -2422,7 +1771,8 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
2422 ATH9K_RESET_POWER_ON) != true) { 1771 ATH9K_RESET_POWER_ON) != true) {
2423 return false; 1772 return false;
2424 } 1773 }
2425 ath9k_hw_init_pll(ah, NULL); 1774 if (!AR_SREV_9300_20_OR_LATER(ah))
1775 ath9k_hw_init_pll(ah, NULL);
2426 } 1776 }
2427 if (AR_SREV_9100(ah)) 1777 if (AR_SREV_9100(ah))
2428 REG_SET_BIT(ah, AR_RTC_RESET, 1778 REG_SET_BIT(ah, AR_RTC_RESET,
@@ -2492,420 +1842,6 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
2492} 1842}
2493EXPORT_SYMBOL(ath9k_hw_setpower); 1843EXPORT_SYMBOL(ath9k_hw_setpower);
2494 1844
2495/*
2496 * Helper for ASPM support.
2497 *
2498 * Disable PLL when in L0s as well as receiver clock when in L1.
2499 * This power saving option must be enabled through the SerDes.
2500 *
2501 * Programming the SerDes must go through the same 288 bit serial shift
2502 * register as the other analog registers. Hence the 9 writes.
2503 */
2504void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off)
2505{
2506 u8 i;
2507 u32 val;
2508
2509 if (ah->is_pciexpress != true)
2510 return;
2511
2512 /* Do not touch SerDes registers */
2513 if (ah->config.pcie_powersave_enable == 2)
2514 return;
2515
2516 /* Nothing to do on restore for 11N */
2517 if (!restore) {
2518 if (AR_SREV_9280_20_OR_LATER(ah)) {
2519 /*
2520 * AR9280 2.0 or later chips use SerDes values from the
2521 * initvals.h initialized depending on chipset during
2522 * ath9k_hw_init()
2523 */
2524 for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) {
2525 REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0),
2526 INI_RA(&ah->iniPcieSerdes, i, 1));
2527 }
2528 } else if (AR_SREV_9280(ah) &&
2529 (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) {
2530 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
2531 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
2532
2533 /* RX shut off when elecidle is asserted */
2534 REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
2535 REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
2536 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
2537
2538 /* Shut off CLKREQ active in L1 */
2539 if (ah->config.pcie_clock_req)
2540 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
2541 else
2542 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
2543
2544 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
2545 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
2546 REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
2547
2548 /* Load the new settings */
2549 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
2550
2551 } else {
2552 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
2553 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
2554
2555 /* RX shut off when elecidle is asserted */
2556 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
2557 REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
2558 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
2559
2560 /*
2561 * Ignore ah->ah_config.pcie_clock_req setting for
2562 * pre-AR9280 11n
2563 */
2564 REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
2565
2566 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
2567 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
2568 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
2569
2570 /* Load the new settings */
2571 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
2572 }
2573
2574 udelay(1000);
2575
2576 /* set bit 19 to allow forcing of pcie core into L1 state */
2577 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
2578
2579 /* Several PCIe massages to ensure proper behaviour */
2580 if (ah->config.pcie_waen) {
2581 val = ah->config.pcie_waen;
2582 if (!power_off)
2583 val &= (~AR_WA_D3_L1_DISABLE);
2584 } else {
2585 if (AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
2586 AR_SREV_9287(ah)) {
2587 val = AR9285_WA_DEFAULT;
2588 if (!power_off)
2589 val &= (~AR_WA_D3_L1_DISABLE);
2590 } else if (AR_SREV_9280(ah)) {
2591 /*
2592 * On AR9280 chips bit 22 of 0x4004 needs to be
2593 * set otherwise card may disappear.
2594 */
2595 val = AR9280_WA_DEFAULT;
2596 if (!power_off)
2597 val &= (~AR_WA_D3_L1_DISABLE);
2598 } else
2599 val = AR_WA_DEFAULT;
2600 }
2601
2602 REG_WRITE(ah, AR_WA, val);
2603 }
2604
2605 if (power_off) {
2606 /*
2607 * Set PCIe workaround bits
2608 * bit 14 in WA register (disable L1) should only
2609 * be set when device enters D3 and be cleared
2610 * when device comes back to D0.
2611 */
2612 if (ah->config.pcie_waen) {
2613 if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
2614 REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
2615 } else {
2616 if (((AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
2617 AR_SREV_9287(ah)) &&
2618 (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
2619 (AR_SREV_9280(ah) &&
2620 (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
2621 REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
2622 }
2623 }
2624 }
2625}
2626EXPORT_SYMBOL(ath9k_hw_configpcipowersave);
2627
2628/**********************/
2629/* Interrupt Handling */
2630/**********************/
2631
2632bool ath9k_hw_intrpend(struct ath_hw *ah)
2633{
2634 u32 host_isr;
2635
2636 if (AR_SREV_9100(ah))
2637 return true;
2638
2639 host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
2640 if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
2641 return true;
2642
2643 host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
2644 if ((host_isr & AR_INTR_SYNC_DEFAULT)
2645 && (host_isr != AR_INTR_SPURIOUS))
2646 return true;
2647
2648 return false;
2649}
2650EXPORT_SYMBOL(ath9k_hw_intrpend);
2651
2652bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
2653{
2654 u32 isr = 0;
2655 u32 mask2 = 0;
2656 struct ath9k_hw_capabilities *pCap = &ah->caps;
2657 u32 sync_cause = 0;
2658 bool fatal_int = false;
2659 struct ath_common *common = ath9k_hw_common(ah);
2660
2661 if (!AR_SREV_9100(ah)) {
2662 if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
2663 if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
2664 == AR_RTC_STATUS_ON) {
2665 isr = REG_READ(ah, AR_ISR);
2666 }
2667 }
2668
2669 sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) &
2670 AR_INTR_SYNC_DEFAULT;
2671
2672 *masked = 0;
2673
2674 if (!isr && !sync_cause)
2675 return false;
2676 } else {
2677 *masked = 0;
2678 isr = REG_READ(ah, AR_ISR);
2679 }
2680
2681 if (isr) {
2682 if (isr & AR_ISR_BCNMISC) {
2683 u32 isr2;
2684 isr2 = REG_READ(ah, AR_ISR_S2);
2685 if (isr2 & AR_ISR_S2_TIM)
2686 mask2 |= ATH9K_INT_TIM;
2687 if (isr2 & AR_ISR_S2_DTIM)
2688 mask2 |= ATH9K_INT_DTIM;
2689 if (isr2 & AR_ISR_S2_DTIMSYNC)
2690 mask2 |= ATH9K_INT_DTIMSYNC;
2691 if (isr2 & (AR_ISR_S2_CABEND))
2692 mask2 |= ATH9K_INT_CABEND;
2693 if (isr2 & AR_ISR_S2_GTT)
2694 mask2 |= ATH9K_INT_GTT;
2695 if (isr2 & AR_ISR_S2_CST)
2696 mask2 |= ATH9K_INT_CST;
2697 if (isr2 & AR_ISR_S2_TSFOOR)
2698 mask2 |= ATH9K_INT_TSFOOR;
2699 }
2700
2701 isr = REG_READ(ah, AR_ISR_RAC);
2702 if (isr == 0xffffffff) {
2703 *masked = 0;
2704 return false;
2705 }
2706
2707 *masked = isr & ATH9K_INT_COMMON;
2708
2709 if (ah->config.rx_intr_mitigation) {
2710 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
2711 *masked |= ATH9K_INT_RX;
2712 }
2713
2714 if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
2715 *masked |= ATH9K_INT_RX;
2716 if (isr &
2717 (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
2718 AR_ISR_TXEOL)) {
2719 u32 s0_s, s1_s;
2720
2721 *masked |= ATH9K_INT_TX;
2722
2723 s0_s = REG_READ(ah, AR_ISR_S0_S);
2724 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
2725 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
2726
2727 s1_s = REG_READ(ah, AR_ISR_S1_S);
2728 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
2729 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
2730 }
2731
2732 if (isr & AR_ISR_RXORN) {
2733 ath_print(common, ATH_DBG_INTERRUPT,
2734 "receive FIFO overrun interrupt\n");
2735 }
2736
2737 if (!AR_SREV_9100(ah)) {
2738 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
2739 u32 isr5 = REG_READ(ah, AR_ISR_S5_S);
2740 if (isr5 & AR_ISR_S5_TIM_TIMER)
2741 *masked |= ATH9K_INT_TIM_TIMER;
2742 }
2743 }
2744
2745 *masked |= mask2;
2746 }
2747
2748 if (AR_SREV_9100(ah))
2749 return true;
2750
2751 if (isr & AR_ISR_GENTMR) {
2752 u32 s5_s;
2753
2754 s5_s = REG_READ(ah, AR_ISR_S5_S);
2755 if (isr & AR_ISR_GENTMR) {
2756 ah->intr_gen_timer_trigger =
2757 MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
2758
2759 ah->intr_gen_timer_thresh =
2760 MS(s5_s, AR_ISR_S5_GENTIMER_THRESH);
2761
2762 if (ah->intr_gen_timer_trigger)
2763 *masked |= ATH9K_INT_GENTIMER;
2764
2765 }
2766 }
2767
2768 if (sync_cause) {
2769 fatal_int =
2770 (sync_cause &
2771 (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
2772 ? true : false;
2773
2774 if (fatal_int) {
2775 if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
2776 ath_print(common, ATH_DBG_ANY,
2777 "received PCI FATAL interrupt\n");
2778 }
2779 if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
2780 ath_print(common, ATH_DBG_ANY,
2781 "received PCI PERR interrupt\n");
2782 }
2783 *masked |= ATH9K_INT_FATAL;
2784 }
2785 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
2786 ath_print(common, ATH_DBG_INTERRUPT,
2787 "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
2788 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
2789 REG_WRITE(ah, AR_RC, 0);
2790 *masked |= ATH9K_INT_FATAL;
2791 }
2792 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
2793 ath_print(common, ATH_DBG_INTERRUPT,
2794 "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
2795 }
2796
2797 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
2798 (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
2799 }
2800
2801 return true;
2802}
2803EXPORT_SYMBOL(ath9k_hw_getisr);
2804
2805enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
2806{
2807 enum ath9k_int omask = ah->imask;
2808 u32 mask, mask2;
2809 struct ath9k_hw_capabilities *pCap = &ah->caps;
2810 struct ath_common *common = ath9k_hw_common(ah);
2811
2812 ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
2813
2814 if (omask & ATH9K_INT_GLOBAL) {
2815 ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
2816 REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
2817 (void) REG_READ(ah, AR_IER);
2818 if (!AR_SREV_9100(ah)) {
2819 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
2820 (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
2821
2822 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
2823 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
2824 }
2825 }
2826
2827 mask = ints & ATH9K_INT_COMMON;
2828 mask2 = 0;
2829
2830 if (ints & ATH9K_INT_TX) {
2831 if (ah->txok_interrupt_mask)
2832 mask |= AR_IMR_TXOK;
2833 if (ah->txdesc_interrupt_mask)
2834 mask |= AR_IMR_TXDESC;
2835 if (ah->txerr_interrupt_mask)
2836 mask |= AR_IMR_TXERR;
2837 if (ah->txeol_interrupt_mask)
2838 mask |= AR_IMR_TXEOL;
2839 }
2840 if (ints & ATH9K_INT_RX) {
2841 mask |= AR_IMR_RXERR;
2842 if (ah->config.rx_intr_mitigation)
2843 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
2844 else
2845 mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
2846 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
2847 mask |= AR_IMR_GENTMR;
2848 }
2849
2850 if (ints & (ATH9K_INT_BMISC)) {
2851 mask |= AR_IMR_BCNMISC;
2852 if (ints & ATH9K_INT_TIM)
2853 mask2 |= AR_IMR_S2_TIM;
2854 if (ints & ATH9K_INT_DTIM)
2855 mask2 |= AR_IMR_S2_DTIM;
2856 if (ints & ATH9K_INT_DTIMSYNC)
2857 mask2 |= AR_IMR_S2_DTIMSYNC;
2858 if (ints & ATH9K_INT_CABEND)
2859 mask2 |= AR_IMR_S2_CABEND;
2860 if (ints & ATH9K_INT_TSFOOR)
2861 mask2 |= AR_IMR_S2_TSFOOR;
2862 }
2863
2864 if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
2865 mask |= AR_IMR_BCNMISC;
2866 if (ints & ATH9K_INT_GTT)
2867 mask2 |= AR_IMR_S2_GTT;
2868 if (ints & ATH9K_INT_CST)
2869 mask2 |= AR_IMR_S2_CST;
2870 }
2871
2872 ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
2873 REG_WRITE(ah, AR_IMR, mask);
2874 ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
2875 AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
2876 AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
2877 ah->imrs2_reg |= mask2;
2878 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
2879
2880 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
2881 if (ints & ATH9K_INT_TIM_TIMER)
2882 REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
2883 else
2884 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
2885 }
2886
2887 if (ints & ATH9K_INT_GLOBAL) {
2888 ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
2889 REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
2890 if (!AR_SREV_9100(ah)) {
2891 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
2892 AR_INTR_MAC_IRQ);
2893 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
2894
2895
2896 REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
2897 AR_INTR_SYNC_DEFAULT);
2898 REG_WRITE(ah, AR_INTR_SYNC_MASK,
2899 AR_INTR_SYNC_DEFAULT);
2900 }
2901 ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
2902 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
2903 }
2904
2905 return omask;
2906}
2907EXPORT_SYMBOL(ath9k_hw_set_interrupts);
2908
2909/*******************/ 1845/*******************/
2910/* Beacon Handling */ 1846/* Beacon Handling */
2911/*******************/ 1847/*******************/
@@ -2916,6 +1852,8 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
2916 1852
2917 ah->beacon_interval = beacon_period; 1853 ah->beacon_interval = beacon_period;
2918 1854
1855 ENABLE_REGWRITE_BUFFER(ah);
1856
2919 switch (ah->opmode) { 1857 switch (ah->opmode) {
2920 case NL80211_IFTYPE_STATION: 1858 case NL80211_IFTYPE_STATION:
2921 case NL80211_IFTYPE_MONITOR: 1859 case NL80211_IFTYPE_MONITOR:
@@ -2959,6 +1897,9 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
2959 REG_WRITE(ah, AR_SWBA_PERIOD, TU_TO_USEC(beacon_period)); 1897 REG_WRITE(ah, AR_SWBA_PERIOD, TU_TO_USEC(beacon_period));
2960 REG_WRITE(ah, AR_NDP_PERIOD, TU_TO_USEC(beacon_period)); 1898 REG_WRITE(ah, AR_NDP_PERIOD, TU_TO_USEC(beacon_period));
2961 1899
1900 REGWRITE_BUFFER_FLUSH(ah);
1901 DISABLE_REGWRITE_BUFFER(ah);
1902
2962 beacon_period &= ~ATH9K_BEACON_ENA; 1903 beacon_period &= ~ATH9K_BEACON_ENA;
2963 if (beacon_period & ATH9K_BEACON_RESET_TSF) { 1904 if (beacon_period & ATH9K_BEACON_RESET_TSF) {
2964 ath9k_hw_reset_tsf(ah); 1905 ath9k_hw_reset_tsf(ah);
@@ -2975,6 +1916,8 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
2975 struct ath9k_hw_capabilities *pCap = &ah->caps; 1916 struct ath9k_hw_capabilities *pCap = &ah->caps;
2976 struct ath_common *common = ath9k_hw_common(ah); 1917 struct ath_common *common = ath9k_hw_common(ah);
2977 1918
1919 ENABLE_REGWRITE_BUFFER(ah);
1920
2978 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt)); 1921 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt));
2979 1922
2980 REG_WRITE(ah, AR_BEACON_PERIOD, 1923 REG_WRITE(ah, AR_BEACON_PERIOD,
@@ -2982,6 +1925,9 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
2982 REG_WRITE(ah, AR_DMA_BEACON_PERIOD, 1925 REG_WRITE(ah, AR_DMA_BEACON_PERIOD,
2983 TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD)); 1926 TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD));
2984 1927
1928 REGWRITE_BUFFER_FLUSH(ah);
1929 DISABLE_REGWRITE_BUFFER(ah);
1930
2985 REG_RMW_FIELD(ah, AR_RSSI_THR, 1931 REG_RMW_FIELD(ah, AR_RSSI_THR,
2986 AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold); 1932 AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold);
2987 1933
@@ -3004,6 +1950,8 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
3004 ath_print(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval); 1950 ath_print(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval);
3005 ath_print(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod); 1951 ath_print(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod);
3006 1952
1953 ENABLE_REGWRITE_BUFFER(ah);
1954
3007 REG_WRITE(ah, AR_NEXT_DTIM, 1955 REG_WRITE(ah, AR_NEXT_DTIM,
3008 TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP)); 1956 TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP));
3009 REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP)); 1957 REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP));
@@ -3023,6 +1971,9 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
3023 REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval)); 1971 REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval));
3024 REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod)); 1972 REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod));
3025 1973
1974 REGWRITE_BUFFER_FLUSH(ah);
1975 DISABLE_REGWRITE_BUFFER(ah);
1976
3026 REG_SET_BIT(ah, AR_TIMER_MODE, 1977 REG_SET_BIT(ah, AR_TIMER_MODE,
3027 AR_TBTT_TIMER_EN | AR_TIM_TIMER_EN | 1978 AR_TBTT_TIMER_EN | AR_TIM_TIMER_EN |
3028 AR_DTIM_TIMER_EN); 1979 AR_DTIM_TIMER_EN);
@@ -3241,6 +2192,26 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
3241 btcoex_hw->scheme = ATH_BTCOEX_CFG_NONE; 2192 btcoex_hw->scheme = ATH_BTCOEX_CFG_NONE;
3242 } 2193 }
3243 2194
2195 if (AR_SREV_9300_20_OR_LATER(ah)) {
2196 pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_LDPC |
2197 ATH9K_HW_CAP_FASTCLOCK;
2198 pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH;
2199 pCap->rx_lp_qdepth = ATH9K_HW_RX_LP_QDEPTH;
2200 pCap->rx_status_len = sizeof(struct ar9003_rxs);
2201 pCap->tx_desc_len = sizeof(struct ar9003_txc);
2202 pCap->txs_len = sizeof(struct ar9003_txs);
2203 } else {
2204 pCap->tx_desc_len = sizeof(struct ath_desc);
2205 if (AR_SREV_9280_20(ah) &&
2206 ((ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) <=
2207 AR5416_EEP_MINOR_VER_16) ||
2208 ah->eep_ops->get_eeprom(ah, EEP_FSTCLK_5G)))
2209 pCap->hw_caps |= ATH9K_HW_CAP_FASTCLOCK;
2210 }
2211
2212 if (AR_SREV_9300_20_OR_LATER(ah))
2213 pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED;
2214
3244 return 0; 2215 return 0;
3245} 2216}
3246 2217
@@ -3273,10 +2244,6 @@ bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
3273 case ATH9K_CAP_TKIP_SPLIT: 2244 case ATH9K_CAP_TKIP_SPLIT:
3274 return (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) ? 2245 return (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) ?
3275 false : true; 2246 false : true;
3276 case ATH9K_CAP_DIVERSITY:
3277 return (REG_READ(ah, AR_PHY_CCK_DETECT) &
3278 AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV) ?
3279 true : false;
3280 case ATH9K_CAP_MCAST_KEYSRCH: 2247 case ATH9K_CAP_MCAST_KEYSRCH:
3281 switch (capability) { 2248 switch (capability) {
3282 case 0: 2249 case 0:
@@ -3319,8 +2286,6 @@ EXPORT_SYMBOL(ath9k_hw_getcapability);
3319bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type, 2286bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
3320 u32 capability, u32 setting, int *status) 2287 u32 capability, u32 setting, int *status)
3321{ 2288{
3322 u32 v;
3323
3324 switch (type) { 2289 switch (type) {
3325 case ATH9K_CAP_TKIP_MIC: 2290 case ATH9K_CAP_TKIP_MIC:
3326 if (setting) 2291 if (setting)
@@ -3330,14 +2295,6 @@ bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
3330 ah->sta_id1_defaults &= 2295 ah->sta_id1_defaults &=
3331 ~AR_STA_ID1_CRPT_MIC_ENABLE; 2296 ~AR_STA_ID1_CRPT_MIC_ENABLE;
3332 return true; 2297 return true;
3333 case ATH9K_CAP_DIVERSITY:
3334 v = REG_READ(ah, AR_PHY_CCK_DETECT);
3335 if (setting)
3336 v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
3337 else
3338 v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
3339 REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
3340 return true;
3341 case ATH9K_CAP_MCAST_KEYSRCH: 2298 case ATH9K_CAP_MCAST_KEYSRCH:
3342 if (setting) 2299 if (setting)
3343 ah->sta_id1_defaults |= AR_STA_ID1_MCAST_KSRCH; 2300 ah->sta_id1_defaults |= AR_STA_ID1_MCAST_KSRCH;
@@ -3405,7 +2362,9 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
3405 if (gpio >= ah->caps.num_gpio_pins) 2362 if (gpio >= ah->caps.num_gpio_pins)
3406 return 0xffffffff; 2363 return 0xffffffff;
3407 2364
3408 if (AR_SREV_9271(ah)) 2365 if (AR_SREV_9300_20_OR_LATER(ah))
2366 return MS_REG_READ(AR9300, gpio) != 0;
2367 else if (AR_SREV_9271(ah))
3409 return MS_REG_READ(AR9271, gpio) != 0; 2368 return MS_REG_READ(AR9271, gpio) != 0;
3410 else if (AR_SREV_9287_10_OR_LATER(ah)) 2369 else if (AR_SREV_9287_10_OR_LATER(ah))
3411 return MS_REG_READ(AR9287, gpio) != 0; 2370 return MS_REG_READ(AR9287, gpio) != 0;
@@ -3478,6 +2437,8 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
3478{ 2437{
3479 u32 phybits; 2438 u32 phybits;
3480 2439
2440 ENABLE_REGWRITE_BUFFER(ah);
2441
3481 REG_WRITE(ah, AR_RX_FILTER, bits); 2442 REG_WRITE(ah, AR_RX_FILTER, bits);
3482 2443
3483 phybits = 0; 2444 phybits = 0;
@@ -3493,6 +2454,9 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
3493 else 2454 else
3494 REG_WRITE(ah, AR_RXCFG, 2455 REG_WRITE(ah, AR_RXCFG,
3495 REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA); 2456 REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA);
2457
2458 REGWRITE_BUFFER_FLUSH(ah);
2459 DISABLE_REGWRITE_BUFFER(ah);
3496} 2460}
3497EXPORT_SYMBOL(ath9k_hw_setrxfilter); 2461EXPORT_SYMBOL(ath9k_hw_setrxfilter);
3498 2462
@@ -3565,14 +2529,25 @@ void ath9k_hw_write_associd(struct ath_hw *ah)
3565} 2529}
3566EXPORT_SYMBOL(ath9k_hw_write_associd); 2530EXPORT_SYMBOL(ath9k_hw_write_associd);
3567 2531
2532#define ATH9K_MAX_TSF_READ 10
2533
3568u64 ath9k_hw_gettsf64(struct ath_hw *ah) 2534u64 ath9k_hw_gettsf64(struct ath_hw *ah)
3569{ 2535{
3570 u64 tsf; 2536 u32 tsf_lower, tsf_upper1, tsf_upper2;
2537 int i;
2538
2539 tsf_upper1 = REG_READ(ah, AR_TSF_U32);
2540 for (i = 0; i < ATH9K_MAX_TSF_READ; i++) {
2541 tsf_lower = REG_READ(ah, AR_TSF_L32);
2542 tsf_upper2 = REG_READ(ah, AR_TSF_U32);
2543 if (tsf_upper2 == tsf_upper1)
2544 break;
2545 tsf_upper1 = tsf_upper2;
2546 }
3571 2547
3572 tsf = REG_READ(ah, AR_TSF_U32); 2548 WARN_ON( i == ATH9K_MAX_TSF_READ );
3573 tsf = (tsf << 32) | REG_READ(ah, AR_TSF_L32);
3574 2549
3575 return tsf; 2550 return (((u64)tsf_upper1 << 32) | tsf_lower);
3576} 2551}
3577EXPORT_SYMBOL(ath9k_hw_gettsf64); 2552EXPORT_SYMBOL(ath9k_hw_gettsf64);
3578 2553
@@ -3847,6 +2822,7 @@ static struct {
3847 { AR_SREV_VERSION_9285, "9285" }, 2822 { AR_SREV_VERSION_9285, "9285" },
3848 { AR_SREV_VERSION_9287, "9287" }, 2823 { AR_SREV_VERSION_9287, "9287" },
3849 { AR_SREV_VERSION_9271, "9271" }, 2824 { AR_SREV_VERSION_9271, "9271" },
2825 { AR_SREV_VERSION_9300, "9300" },
3850}; 2826};
3851 2827
3852/* For devices with external radios */ 2828/* For devices with external radios */
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index f4821cf33b87..77245dff5993 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2010 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -41,6 +41,9 @@
41#define AR9280_DEVID_PCIE 0x002a 41#define AR9280_DEVID_PCIE 0x002a
42#define AR9285_DEVID_PCIE 0x002b 42#define AR9285_DEVID_PCIE 0x002b
43#define AR2427_DEVID_PCIE 0x002c 43#define AR2427_DEVID_PCIE 0x002c
44#define AR9287_DEVID_PCI 0x002d
45#define AR9287_DEVID_PCIE 0x002e
46#define AR9300_DEVID_PCIE 0x0030
44 47
45#define AR5416_AR9100_DEVID 0x000b 48#define AR5416_AR9100_DEVID 0x000b
46 49
@@ -48,9 +51,6 @@
48#define AR_SUBVENDOR_ID_NEW_A 0x7065 51#define AR_SUBVENDOR_ID_NEW_A 0x7065
49#define AR5416_MAGIC 0x19641014 52#define AR5416_MAGIC 0x19641014
50 53
51#define AR5416_DEVID_AR9287_PCI 0x002D
52#define AR5416_DEVID_AR9287_PCIE 0x002E
53
54#define AR9280_COEX2WIRE_SUBSYSID 0x309b 54#define AR9280_COEX2WIRE_SUBSYSID 0x309b
55#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa 55#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa
56#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab 56#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab
@@ -68,6 +68,24 @@
68#define REG_READ(_ah, _reg) \ 68#define REG_READ(_ah, _reg) \
69 ath9k_hw_common(_ah)->ops->read((_ah), (_reg)) 69 ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
70 70
71#define ENABLE_REGWRITE_BUFFER(_ah) \
72 do { \
73 if (AR_SREV_9271(_ah)) \
74 ath9k_hw_common(_ah)->ops->enable_write_buffer((_ah)); \
75 } while (0)
76
77#define DISABLE_REGWRITE_BUFFER(_ah) \
78 do { \
79 if (AR_SREV_9271(_ah)) \
80 ath9k_hw_common(_ah)->ops->disable_write_buffer((_ah)); \
81 } while (0)
82
83#define REGWRITE_BUFFER_FLUSH(_ah) \
84 do { \
85 if (AR_SREV_9271(_ah)) \
86 ath9k_hw_common(_ah)->ops->write_flush((_ah)); \
87 } while (0)
88
71#define SM(_v, _f) (((_v) << _f##_S) & _f) 89#define SM(_v, _f) (((_v) << _f##_S) & _f)
72#define MS(_v, _f) (((_v) & _f) >> _f##_S) 90#define MS(_v, _f) (((_v) & _f) >> _f##_S)
73#define REG_RMW(_a, _r, _set, _clr) \ 91#define REG_RMW(_a, _r, _set, _clr) \
@@ -75,6 +93,8 @@
75#define REG_RMW_FIELD(_a, _r, _f, _v) \ 93#define REG_RMW_FIELD(_a, _r, _f, _v) \
76 REG_WRITE(_a, _r, \ 94 REG_WRITE(_a, _r, \
77 (REG_READ(_a, _r) & ~_f) | (((_v) << _f##_S) & _f)) 95 (REG_READ(_a, _r) & ~_f) | (((_v) << _f##_S) & _f))
96#define REG_READ_FIELD(_a, _r, _f) \
97 (((REG_READ(_a, _r) & _f) >> _f##_S))
78#define REG_SET_BIT(_a, _r, _f) \ 98#define REG_SET_BIT(_a, _r, _f) \
79 REG_WRITE(_a, _r, REG_READ(_a, _r) | _f) 99 REG_WRITE(_a, _r, REG_READ(_a, _r) | _f)
80#define REG_CLR_BIT(_a, _r, _f) \ 100#define REG_CLR_BIT(_a, _r, _f) \
@@ -135,6 +155,16 @@
135 155
136#define TU_TO_USEC(_tu) ((_tu) << 10) 156#define TU_TO_USEC(_tu) ((_tu) << 10)
137 157
158#define ATH9K_HW_RX_HP_QDEPTH 16
159#define ATH9K_HW_RX_LP_QDEPTH 128
160
161enum ath_ini_subsys {
162 ATH_INI_PRE = 0,
163 ATH_INI_CORE,
164 ATH_INI_POST,
165 ATH_INI_NUM_SPLIT,
166};
167
138enum wireless_mode { 168enum wireless_mode {
139 ATH9K_MODE_11A = 0, 169 ATH9K_MODE_11A = 0,
140 ATH9K_MODE_11G, 170 ATH9K_MODE_11G,
@@ -165,13 +195,16 @@ enum ath9k_hw_caps {
165 ATH9K_HW_CAP_ENHANCEDPM = BIT(14), 195 ATH9K_HW_CAP_ENHANCEDPM = BIT(14),
166 ATH9K_HW_CAP_AUTOSLEEP = BIT(15), 196 ATH9K_HW_CAP_AUTOSLEEP = BIT(15),
167 ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(16), 197 ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(16),
198 ATH9K_HW_CAP_EDMA = BIT(17),
199 ATH9K_HW_CAP_RAC_SUPPORTED = BIT(18),
200 ATH9K_HW_CAP_LDPC = BIT(19),
201 ATH9K_HW_CAP_FASTCLOCK = BIT(20),
168}; 202};
169 203
170enum ath9k_capability_type { 204enum ath9k_capability_type {
171 ATH9K_CAP_CIPHER = 0, 205 ATH9K_CAP_CIPHER = 0,
172 ATH9K_CAP_TKIP_MIC, 206 ATH9K_CAP_TKIP_MIC,
173 ATH9K_CAP_TKIP_SPLIT, 207 ATH9K_CAP_TKIP_SPLIT,
174 ATH9K_CAP_DIVERSITY,
175 ATH9K_CAP_TXPOW, 208 ATH9K_CAP_TXPOW,
176 ATH9K_CAP_MCAST_KEYSRCH, 209 ATH9K_CAP_MCAST_KEYSRCH,
177 ATH9K_CAP_DS 210 ATH9K_CAP_DS
@@ -192,6 +225,11 @@ struct ath9k_hw_capabilities {
192 u8 num_gpio_pins; 225 u8 num_gpio_pins;
193 u8 num_antcfg_2ghz; 226 u8 num_antcfg_2ghz;
194 u8 num_antcfg_5ghz; 227 u8 num_antcfg_5ghz;
228 u8 rx_hp_qdepth;
229 u8 rx_lp_qdepth;
230 u8 rx_status_len;
231 u8 tx_desc_len;
232 u8 txs_len;
195}; 233};
196 234
197struct ath9k_ops_config { 235struct ath9k_ops_config {
@@ -212,6 +250,7 @@ struct ath9k_ops_config {
212 u32 enable_ani; 250 u32 enable_ani;
213 int serialize_regmode; 251 int serialize_regmode;
214 bool rx_intr_mitigation; 252 bool rx_intr_mitigation;
253 bool tx_intr_mitigation;
215#define SPUR_DISABLE 0 254#define SPUR_DISABLE 0
216#define SPUR_ENABLE_IOCTL 1 255#define SPUR_ENABLE_IOCTL 1
217#define SPUR_ENABLE_EEPROM 2 256#define SPUR_ENABLE_EEPROM 2
@@ -223,6 +262,7 @@ struct ath9k_ops_config {
223#define AR_BASE_FREQ_5GHZ 4900 262#define AR_BASE_FREQ_5GHZ 4900
224#define AR_SPUR_FEEQ_BOUND_HT40 19 263#define AR_SPUR_FEEQ_BOUND_HT40 19
225#define AR_SPUR_FEEQ_BOUND_HT20 10 264#define AR_SPUR_FEEQ_BOUND_HT20 10
265 bool tx_iq_calibration; /* Only available for >= AR9003 */
226 int spurmode; 266 int spurmode;
227 u16 spurchans[AR_EEPROM_MODAL_SPURS][2]; 267 u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
228 u8 max_txtrig_level; 268 u8 max_txtrig_level;
@@ -231,6 +271,8 @@ struct ath9k_ops_config {
231enum ath9k_int { 271enum ath9k_int {
232 ATH9K_INT_RX = 0x00000001, 272 ATH9K_INT_RX = 0x00000001,
233 ATH9K_INT_RXDESC = 0x00000002, 273 ATH9K_INT_RXDESC = 0x00000002,
274 ATH9K_INT_RXHP = 0x00000001,
275 ATH9K_INT_RXLP = 0x00000002,
234 ATH9K_INT_RXNOFRM = 0x00000008, 276 ATH9K_INT_RXNOFRM = 0x00000008,
235 ATH9K_INT_RXEOL = 0x00000010, 277 ATH9K_INT_RXEOL = 0x00000010,
236 ATH9K_INT_RXORN = 0x00000020, 278 ATH9K_INT_RXORN = 0x00000020,
@@ -327,10 +369,9 @@ struct ath9k_channel {
327#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0) 369#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0)
328#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0) 370#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0)
329#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0) 371#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0)
330#define IS_CHAN_A_5MHZ_SPACED(_c) \ 372#define IS_CHAN_A_FAST_CLOCK(_ah, _c) \
331 ((((_c)->channelFlags & CHANNEL_5GHZ) != 0) && \ 373 ((((_c)->channelFlags & CHANNEL_5GHZ) != 0) && \
332 (((_c)->channel % 20) != 0) && \ 374 ((_ah)->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK))
333 (((_c)->channel % 10) != 0))
334 375
335/* These macros check chanmode and not channelFlags */ 376/* These macros check chanmode and not channelFlags */
336#define IS_CHAN_B(_c) ((_c)->chanmode == CHANNEL_B) 377#define IS_CHAN_B(_c) ((_c)->chanmode == CHANNEL_B)
@@ -363,6 +404,12 @@ enum ser_reg_mode {
363 SER_REG_MODE_AUTO = 2, 404 SER_REG_MODE_AUTO = 2,
364}; 405};
365 406
407enum ath9k_rx_qtype {
408 ATH9K_RX_QUEUE_HP,
409 ATH9K_RX_QUEUE_LP,
410 ATH9K_RX_QUEUE_MAX,
411};
412
366struct ath9k_beacon_state { 413struct ath9k_beacon_state {
367 u32 bs_nexttbtt; 414 u32 bs_nexttbtt;
368 u32 bs_nextdtim; 415 u32 bs_nextdtim;
@@ -440,6 +487,124 @@ struct ath_gen_timer_table {
440 } timer_mask; 487 } timer_mask;
441}; 488};
442 489
490/**
491 * struct ath_hw_private_ops - callbacks used internally by hardware code
492 *
493 * This structure contains private callbacks designed to only be used internally
494 * by the hardware core.
495 *
496 * @init_cal_settings: setup types of calibrations supported
497 * @init_cal: starts actual calibration
498 *
499 * @init_mode_regs: Initializes mode registers
500 * @init_mode_gain_regs: Initialize TX/RX gain registers
501 * @macversion_supported: If this specific mac revision is supported
502 *
503 * @rf_set_freq: change frequency
504 * @spur_mitigate_freq: spur mitigation
505 * @rf_alloc_ext_banks:
506 * @rf_free_ext_banks:
507 * @set_rf_regs:
508 * @compute_pll_control: compute the PLL control value to use for
509 * AR_RTC_PLL_CONTROL for a given channel
510 * @setup_calibration: set up calibration
511 * @iscal_supported: used to query if a type of calibration is supported
512 * @loadnf: load noise floor read from each chain on the CCA registers
513 */
514struct ath_hw_private_ops {
515 /* Calibration ops */
516 void (*init_cal_settings)(struct ath_hw *ah);
517 bool (*init_cal)(struct ath_hw *ah, struct ath9k_channel *chan);
518
519 void (*init_mode_regs)(struct ath_hw *ah);
520 void (*init_mode_gain_regs)(struct ath_hw *ah);
521 bool (*macversion_supported)(u32 macversion);
522 void (*setup_calibration)(struct ath_hw *ah,
523 struct ath9k_cal_list *currCal);
524 bool (*iscal_supported)(struct ath_hw *ah,
525 enum ath9k_cal_types calType);
526
527 /* PHY ops */
528 int (*rf_set_freq)(struct ath_hw *ah,
529 struct ath9k_channel *chan);
530 void (*spur_mitigate_freq)(struct ath_hw *ah,
531 struct ath9k_channel *chan);
532 int (*rf_alloc_ext_banks)(struct ath_hw *ah);
533 void (*rf_free_ext_banks)(struct ath_hw *ah);
534 bool (*set_rf_regs)(struct ath_hw *ah,
535 struct ath9k_channel *chan,
536 u16 modesIndex);
537 void (*set_channel_regs)(struct ath_hw *ah, struct ath9k_channel *chan);
538 void (*init_bb)(struct ath_hw *ah,
539 struct ath9k_channel *chan);
540 int (*process_ini)(struct ath_hw *ah, struct ath9k_channel *chan);
541 void (*olc_init)(struct ath_hw *ah);
542 void (*set_rfmode)(struct ath_hw *ah, struct ath9k_channel *chan);
543 void (*mark_phy_inactive)(struct ath_hw *ah);
544 void (*set_delta_slope)(struct ath_hw *ah, struct ath9k_channel *chan);
545 bool (*rfbus_req)(struct ath_hw *ah);
546 void (*rfbus_done)(struct ath_hw *ah);
547 void (*enable_rfkill)(struct ath_hw *ah);
548 void (*restore_chainmask)(struct ath_hw *ah);
549 void (*set_diversity)(struct ath_hw *ah, bool value);
550 u32 (*compute_pll_control)(struct ath_hw *ah,
551 struct ath9k_channel *chan);
552 bool (*ani_control)(struct ath_hw *ah, enum ath9k_ani_cmd cmd,
553 int param);
554 void (*do_getnf)(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]);
555 void (*loadnf)(struct ath_hw *ah, struct ath9k_channel *chan);
556};
557
558/**
559 * struct ath_hw_ops - callbacks used by hardware code and driver code
560 *
561 * This structure contains callbacks designed to to be used internally by
562 * hardware code and also by the lower level driver.
563 *
564 * @config_pci_powersave:
565 * @calibrate: periodic calibration for NF, ANI, IQ, ADC gain, ADC-DC
566 */
567struct ath_hw_ops {
568 void (*config_pci_powersave)(struct ath_hw *ah,
569 int restore,
570 int power_off);
571 void (*rx_enable)(struct ath_hw *ah);
572 void (*set_desc_link)(void *ds, u32 link);
573 void (*get_desc_link)(void *ds, u32 **link);
574 bool (*calibrate)(struct ath_hw *ah,
575 struct ath9k_channel *chan,
576 u8 rxchainmask,
577 bool longcal);
578 bool (*get_isr)(struct ath_hw *ah, enum ath9k_int *masked);
579 void (*fill_txdesc)(struct ath_hw *ah, void *ds, u32 seglen,
580 bool is_firstseg, bool is_is_lastseg,
581 const void *ds0, dma_addr_t buf_addr,
582 unsigned int qcu);
583 int (*proc_txdesc)(struct ath_hw *ah, void *ds,
584 struct ath_tx_status *ts);
585 void (*set11n_txdesc)(struct ath_hw *ah, void *ds,
586 u32 pktLen, enum ath9k_pkt_type type,
587 u32 txPower, u32 keyIx,
588 enum ath9k_key_type keyType,
589 u32 flags);
590 void (*set11n_ratescenario)(struct ath_hw *ah, void *ds,
591 void *lastds,
592 u32 durUpdateEn, u32 rtsctsRate,
593 u32 rtsctsDuration,
594 struct ath9k_11n_rate_series series[],
595 u32 nseries, u32 flags);
596 void (*set11n_aggr_first)(struct ath_hw *ah, void *ds,
597 u32 aggrLen);
598 void (*set11n_aggr_middle)(struct ath_hw *ah, void *ds,
599 u32 numDelims);
600 void (*set11n_aggr_last)(struct ath_hw *ah, void *ds);
601 void (*clr11n_aggr)(struct ath_hw *ah, void *ds);
602 void (*set11n_burstduration)(struct ath_hw *ah, void *ds,
603 u32 burstDuration);
604 void (*set11n_virtualmorefrag)(struct ath_hw *ah, void *ds,
605 u32 vmf);
606};
607
443struct ath_hw { 608struct ath_hw {
444 struct ieee80211_hw *hw; 609 struct ieee80211_hw *hw;
445 struct ath_common common; 610 struct ath_common common;
@@ -453,14 +618,18 @@ struct ath_hw {
453 struct ar5416_eeprom_def def; 618 struct ar5416_eeprom_def def;
454 struct ar5416_eeprom_4k map4k; 619 struct ar5416_eeprom_4k map4k;
455 struct ar9287_eeprom map9287; 620 struct ar9287_eeprom map9287;
621 struct ar9300_eeprom ar9300_eep;
456 } eeprom; 622 } eeprom;
457 const struct eeprom_ops *eep_ops; 623 const struct eeprom_ops *eep_ops;
458 enum ath9k_eep_map eep_map;
459 624
460 bool sw_mgmt_crypto; 625 bool sw_mgmt_crypto;
461 bool is_pciexpress; 626 bool is_pciexpress;
462 bool need_an_top2_fixup; 627 bool need_an_top2_fixup;
463 u16 tx_trig_level; 628 u16 tx_trig_level;
629 s16 nf_2g_max;
630 s16 nf_2g_min;
631 s16 nf_5g_max;
632 s16 nf_5g_min;
464 u16 rfsilent; 633 u16 rfsilent;
465 u32 rfkill_gpio; 634 u32 rfkill_gpio;
466 u32 rfkill_polarity; 635 u32 rfkill_polarity;
@@ -493,6 +662,7 @@ struct ath_hw {
493 struct ath9k_cal_list adcgain_caldata; 662 struct ath9k_cal_list adcgain_caldata;
494 struct ath9k_cal_list adcdc_calinitdata; 663 struct ath9k_cal_list adcdc_calinitdata;
495 struct ath9k_cal_list adcdc_caldata; 664 struct ath9k_cal_list adcdc_caldata;
665 struct ath9k_cal_list tempCompCalData;
496 struct ath9k_cal_list *cal_list; 666 struct ath9k_cal_list *cal_list;
497 struct ath9k_cal_list *cal_list_last; 667 struct ath9k_cal_list *cal_list_last;
498 struct ath9k_cal_list *cal_list_curr; 668 struct ath9k_cal_list *cal_list_curr;
@@ -533,12 +703,10 @@ struct ath_hw {
533 DONT_USE_32KHZ, 703 DONT_USE_32KHZ,
534 } enable_32kHz_clock; 704 } enable_32kHz_clock;
535 705
536 /* Callback for radio frequency change */ 706 /* Private to hardware code */
537 int (*ath9k_hw_rf_set_freq)(struct ath_hw *ah, struct ath9k_channel *chan); 707 struct ath_hw_private_ops private_ops;
538 708 /* Accessed by the lower level driver */
539 /* Callback for baseband spur frequency */ 709 struct ath_hw_ops ops;
540 void (*ath9k_hw_spur_mitigate_freq)(struct ath_hw *ah,
541 struct ath9k_channel *chan);
542 710
543 /* Used to program the radio on non single-chip devices */ 711 /* Used to program the radio on non single-chip devices */
544 u32 *analogBank0Data; 712 u32 *analogBank0Data;
@@ -551,6 +719,7 @@ struct ath_hw {
551 u32 *addac5416_21; 719 u32 *addac5416_21;
552 u32 *bank6Temp; 720 u32 *bank6Temp;
553 721
722 u8 txpower_limit;
554 int16_t txpower_indexoffset; 723 int16_t txpower_indexoffset;
555 int coverage_class; 724 int coverage_class;
556 u32 beacon_interval; 725 u32 beacon_interval;
@@ -592,6 +761,7 @@ struct ath_hw {
592 struct ar5416IniArray iniBank7; 761 struct ar5416IniArray iniBank7;
593 struct ar5416IniArray iniAddac; 762 struct ar5416IniArray iniAddac;
594 struct ar5416IniArray iniPcieSerdes; 763 struct ar5416IniArray iniPcieSerdes;
764 struct ar5416IniArray iniPcieSerdesLowPower;
595 struct ar5416IniArray iniModesAdditional; 765 struct ar5416IniArray iniModesAdditional;
596 struct ar5416IniArray iniModesRxGain; 766 struct ar5416IniArray iniModesRxGain;
597 struct ar5416IniArray iniModesTxGain; 767 struct ar5416IniArray iniModesTxGain;
@@ -604,9 +774,21 @@ struct ath_hw {
604 struct ar5416IniArray iniModes_high_power_tx_gain_9271; 774 struct ar5416IniArray iniModes_high_power_tx_gain_9271;
605 struct ar5416IniArray iniModes_normal_power_tx_gain_9271; 775 struct ar5416IniArray iniModes_normal_power_tx_gain_9271;
606 776
777 struct ar5416IniArray iniMac[ATH_INI_NUM_SPLIT];
778 struct ar5416IniArray iniBB[ATH_INI_NUM_SPLIT];
779 struct ar5416IniArray iniRadio[ATH_INI_NUM_SPLIT];
780 struct ar5416IniArray iniSOC[ATH_INI_NUM_SPLIT];
781
607 u32 intr_gen_timer_trigger; 782 u32 intr_gen_timer_trigger;
608 u32 intr_gen_timer_thresh; 783 u32 intr_gen_timer_thresh;
609 struct ath_gen_timer_table hw_gen_timers; 784 struct ath_gen_timer_table hw_gen_timers;
785
786 struct ar9003_txs *ts_ring;
787 void *ts_start;
788 u32 ts_paddr_start;
789 u32 ts_paddr_end;
790 u16 ts_tail;
791 u8 ts_size;
610}; 792};
611 793
612static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah) 794static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah)
@@ -619,6 +801,16 @@ static inline struct ath_regulatory *ath9k_hw_regulatory(struct ath_hw *ah)
619 return &(ath9k_hw_common(ah)->regulatory); 801 return &(ath9k_hw_common(ah)->regulatory);
620} 802}
621 803
804static inline struct ath_hw_private_ops *ath9k_hw_private_ops(struct ath_hw *ah)
805{
806 return &ah->private_ops;
807}
808
809static inline struct ath_hw_ops *ath9k_hw_ops(struct ath_hw *ah)
810{
811 return &ah->ops;
812}
813
622/* Initialization, Detach, Reset */ 814/* Initialization, Detach, Reset */
623const char *ath9k_hw_probe(u16 vendorid, u16 devid); 815const char *ath9k_hw_probe(u16 vendorid, u16 devid);
624void ath9k_hw_deinit(struct ath_hw *ah); 816void ath9k_hw_deinit(struct ath_hw *ah);
@@ -630,6 +822,7 @@ bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
630 u32 capability, u32 *result); 822 u32 capability, u32 *result);
631bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type, 823bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
632 u32 capability, u32 setting, int *status); 824 u32 capability, u32 setting, int *status);
825u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan);
633 826
634/* Key Cache Management */ 827/* Key Cache Management */
635bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry); 828bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry);
@@ -678,16 +871,10 @@ void ath9k_hw_set11nmac2040(struct ath_hw *ah);
678void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period); 871void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
679void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah, 872void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
680 const struct ath9k_beacon_state *bs); 873 const struct ath9k_beacon_state *bs);
874bool ath9k_hw_check_alive(struct ath_hw *ah);
681 875
682bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode); 876bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
683 877
684void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off);
685
686/* Interrupt Handling */
687bool ath9k_hw_intrpend(struct ath_hw *ah);
688bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked);
689enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints);
690
691/* Generic hw timer primitives */ 878/* Generic hw timer primitives */
692struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah, 879struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
693 void (*trigger)(void *), 880 void (*trigger)(void *),
@@ -709,6 +896,36 @@ void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len);
709/* HTC */ 896/* HTC */
710void ath9k_hw_htc_resetinit(struct ath_hw *ah); 897void ath9k_hw_htc_resetinit(struct ath_hw *ah);
711 898
899/* PHY */
900void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
901 u32 *coef_mantissa, u32 *coef_exponent);
902
903/*
904 * Code Specific to AR5008, AR9001 or AR9002,
905 * we stuff these here to avoid callbacks for AR9003.
906 */
907void ar9002_hw_cck_chan14_spread(struct ath_hw *ah);
908int ar9002_hw_rf_claim(struct ath_hw *ah);
909void ar9002_hw_enable_async_fifo(struct ath_hw *ah);
910void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah);
911
912/*
913 * Code specifric to AR9003, we stuff these here to avoid callbacks
914 * for older families
915 */
916void ar9003_hw_set_nf_limits(struct ath_hw *ah);
917
918/* Hardware family op attach helpers */
919void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
920void ar9002_hw_attach_phy_ops(struct ath_hw *ah);
921void ar9003_hw_attach_phy_ops(struct ath_hw *ah);
922
923void ar9002_hw_attach_calib_ops(struct ath_hw *ah);
924void ar9003_hw_attach_calib_ops(struct ath_hw *ah);
925
926void ar9002_hw_attach_ops(struct ath_hw *ah);
927void ar9003_hw_attach_ops(struct ath_hw *ah);
928
712#define ATH_PCIE_CAP_LINK_CTRL 0x70 929#define ATH_PCIE_CAP_LINK_CTRL 0x70
713#define ATH_PCIE_CAP_LINK_L0S 1 930#define ATH_PCIE_CAP_LINK_L0S 1
714#define ATH_PCIE_CAP_LINK_L1 2 931#define ATH_PCIE_CAP_LINK_L1 2
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index b78308c3c4d4..8c795488ebc3 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -175,6 +175,18 @@ static const struct ath_ops ath9k_common_ops = {
175 .write = ath9k_iowrite32, 175 .write = ath9k_iowrite32,
176}; 176};
177 177
178static int count_streams(unsigned int chainmask, int max)
179{
180 int streams = 0;
181
182 do {
183 if (++streams == max)
184 break;
185 } while ((chainmask = chainmask & (chainmask - 1)));
186
187 return streams;
188}
189
178/**************************/ 190/**************************/
179/* Initialization */ 191/* Initialization */
180/**************************/ 192/**************************/
@@ -182,8 +194,10 @@ static const struct ath_ops ath9k_common_ops = {
182static void setup_ht_cap(struct ath_softc *sc, 194static void setup_ht_cap(struct ath_softc *sc,
183 struct ieee80211_sta_ht_cap *ht_info) 195 struct ieee80211_sta_ht_cap *ht_info)
184{ 196{
185 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 197 struct ath_hw *ah = sc->sc_ah;
198 struct ath_common *common = ath9k_hw_common(ah);
186 u8 tx_streams, rx_streams; 199 u8 tx_streams, rx_streams;
200 int i, max_streams;
187 201
188 ht_info->ht_supported = true; 202 ht_info->ht_supported = true;
189 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 203 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
@@ -191,28 +205,40 @@ static void setup_ht_cap(struct ath_softc *sc,
191 IEEE80211_HT_CAP_SGI_40 | 205 IEEE80211_HT_CAP_SGI_40 |
192 IEEE80211_HT_CAP_DSSSCCK40; 206 IEEE80211_HT_CAP_DSSSCCK40;
193 207
208 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
209 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
210
194 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 211 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
195 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; 212 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
196 213
214 if (AR_SREV_9300_20_OR_LATER(ah))
215 max_streams = 3;
216 else
217 max_streams = 2;
218
219 if (AR_SREV_9280_10_OR_LATER(ah)) {
220 if (max_streams >= 2)
221 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
222 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
223 }
224
197 /* set up supported mcs set */ 225 /* set up supported mcs set */
198 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); 226 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
199 tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ? 227 tx_streams = count_streams(common->tx_chainmask, max_streams);
200 1 : 2; 228 rx_streams = count_streams(common->rx_chainmask, max_streams);
201 rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ? 229
202 1 : 2; 230 ath_print(common, ATH_DBG_CONFIG,
231 "TX streams %d, RX streams: %d\n",
232 tx_streams, rx_streams);
203 233
204 if (tx_streams != rx_streams) { 234 if (tx_streams != rx_streams) {
205 ath_print(common, ATH_DBG_CONFIG,
206 "TX streams %d, RX streams: %d\n",
207 tx_streams, rx_streams);
208 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; 235 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
209 ht_info->mcs.tx_params |= ((tx_streams - 1) << 236 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
210 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); 237 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
211 } 238 }
212 239
213 ht_info->mcs.rx_mask[0] = 0xff; 240 for (i = 0; i < rx_streams; i++)
214 if (rx_streams >= 2) 241 ht_info->mcs.rx_mask[i] = 0xff;
215 ht_info->mcs.rx_mask[1] = 0xff;
216 242
217 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; 243 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
218} 244}
@@ -235,31 +261,37 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
235*/ 261*/
236int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, 262int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
237 struct list_head *head, const char *name, 263 struct list_head *head, const char *name,
238 int nbuf, int ndesc) 264 int nbuf, int ndesc, bool is_tx)
239{ 265{
240#define DS2PHYS(_dd, _ds) \ 266#define DS2PHYS(_dd, _ds) \
241 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 267 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
242#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0) 268#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
243#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096) 269#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
244 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 270 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
245 struct ath_desc *ds; 271 u8 *ds;
246 struct ath_buf *bf; 272 struct ath_buf *bf;
247 int i, bsize, error; 273 int i, bsize, error, desc_len;
248 274
249 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n", 275 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
250 name, nbuf, ndesc); 276 name, nbuf, ndesc);
251 277
252 INIT_LIST_HEAD(head); 278 INIT_LIST_HEAD(head);
279
280 if (is_tx)
281 desc_len = sc->sc_ah->caps.tx_desc_len;
282 else
283 desc_len = sizeof(struct ath_desc);
284
253 /* ath_desc must be a multiple of DWORDs */ 285 /* ath_desc must be a multiple of DWORDs */
254 if ((sizeof(struct ath_desc) % 4) != 0) { 286 if ((desc_len % 4) != 0) {
255 ath_print(common, ATH_DBG_FATAL, 287 ath_print(common, ATH_DBG_FATAL,
256 "ath_desc not DWORD aligned\n"); 288 "ath_desc not DWORD aligned\n");
257 BUG_ON((sizeof(struct ath_desc) % 4) != 0); 289 BUG_ON((desc_len % 4) != 0);
258 error = -ENOMEM; 290 error = -ENOMEM;
259 goto fail; 291 goto fail;
260 } 292 }
261 293
262 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc; 294 dd->dd_desc_len = desc_len * nbuf * ndesc;
263 295
264 /* 296 /*
265 * Need additional DMA memory because we can't use 297 * Need additional DMA memory because we can't use
@@ -272,7 +304,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
272 u32 dma_len; 304 u32 dma_len;
273 305
274 while (ndesc_skipped) { 306 while (ndesc_skipped) {
275 dma_len = ndesc_skipped * sizeof(struct ath_desc); 307 dma_len = ndesc_skipped * desc_len;
276 dd->dd_desc_len += dma_len; 308 dd->dd_desc_len += dma_len;
277 309
278 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len); 310 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
@@ -286,7 +318,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
286 error = -ENOMEM; 318 error = -ENOMEM;
287 goto fail; 319 goto fail;
288 } 320 }
289 ds = dd->dd_desc; 321 ds = (u8 *) dd->dd_desc;
290 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n", 322 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
291 name, ds, (u32) dd->dd_desc_len, 323 name, ds, (u32) dd->dd_desc_len,
292 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len); 324 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
@@ -300,7 +332,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
300 } 332 }
301 dd->dd_bufptr = bf; 333 dd->dd_bufptr = bf;
302 334
303 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) { 335 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
304 bf->bf_desc = ds; 336 bf->bf_desc = ds;
305 bf->bf_daddr = DS2PHYS(dd, ds); 337 bf->bf_daddr = DS2PHYS(dd, ds);
306 338
@@ -316,7 +348,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
316 ((caddr_t) dd->dd_desc + 348 ((caddr_t) dd->dd_desc +
317 dd->dd_desc_len)); 349 dd->dd_desc_len));
318 350
319 ds += ndesc; 351 ds += (desc_len * ndesc);
320 bf->bf_desc = ds; 352 bf->bf_desc = ds;
321 bf->bf_daddr = DS2PHYS(dd, ds); 353 bf->bf_daddr = DS2PHYS(dd, ds);
322 } 354 }
@@ -514,7 +546,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
514 common->tx_chainmask = sc->sc_ah->caps.tx_chainmask; 546 common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
515 common->rx_chainmask = sc->sc_ah->caps.rx_chainmask; 547 common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
516 548
517 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_DIVERSITY, 1, true, NULL); 549 ath9k_hw_set_diversity(sc->sc_ah, true);
518 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah); 550 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
519 551
520 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 552 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
@@ -568,13 +600,10 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
568 ath_read_cachesize(common, &csz); 600 ath_read_cachesize(common, &csz);
569 common->cachelsz = csz << 2; /* convert to bytes */ 601 common->cachelsz = csz << 2; /* convert to bytes */
570 602
603 /* Initializes the hardware for all supported chipsets */
571 ret = ath9k_hw_init(ah); 604 ret = ath9k_hw_init(ah);
572 if (ret) { 605 if (ret)
573 ath_print(common, ATH_DBG_FATAL,
574 "Unable to initialize hardware; "
575 "initialization status: %d\n", ret);
576 goto err_hw; 606 goto err_hw;
577 }
578 607
579 ret = ath9k_init_debug(ah); 608 ret = ath9k_init_debug(ah);
580 if (ret) { 609 if (ret) {
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 4a2060e5a777..0e425cb4bbb1 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -25,6 +25,8 @@ static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
25 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask, 25 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
26 ah->txurn_interrupt_mask); 26 ah->txurn_interrupt_mask);
27 27
28 ENABLE_REGWRITE_BUFFER(ah);
29
28 REG_WRITE(ah, AR_IMR_S0, 30 REG_WRITE(ah, AR_IMR_S0,
29 SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK) 31 SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
30 | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC)); 32 | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
@@ -35,6 +37,9 @@ static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
35 ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN; 37 ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN;
36 ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN); 38 ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN);
37 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg); 39 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
40
41 REGWRITE_BUFFER_FLUSH(ah);
42 DISABLE_REGWRITE_BUFFER(ah);
38} 43}
39 44
40u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q) 45u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
@@ -57,6 +62,18 @@ void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
57} 62}
58EXPORT_SYMBOL(ath9k_hw_txstart); 63EXPORT_SYMBOL(ath9k_hw_txstart);
59 64
65void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds)
66{
67 struct ar5416_desc *ads = AR5416DESC(ds);
68
69 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
70 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
71 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
72 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
73 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
74}
75EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
76
60u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q) 77u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
61{ 78{
62 u32 npend; 79 u32 npend;
@@ -207,281 +224,6 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
207} 224}
208EXPORT_SYMBOL(ath9k_hw_stoptxdma); 225EXPORT_SYMBOL(ath9k_hw_stoptxdma);
209 226
210void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
211 u32 segLen, bool firstSeg,
212 bool lastSeg, const struct ath_desc *ds0)
213{
214 struct ar5416_desc *ads = AR5416DESC(ds);
215
216 if (firstSeg) {
217 ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
218 } else if (lastSeg) {
219 ads->ds_ctl0 = 0;
220 ads->ds_ctl1 = segLen;
221 ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
222 ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
223 } else {
224 ads->ds_ctl0 = 0;
225 ads->ds_ctl1 = segLen | AR_TxMore;
226 ads->ds_ctl2 = 0;
227 ads->ds_ctl3 = 0;
228 }
229 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
230 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
231 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
232 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
233 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
234}
235EXPORT_SYMBOL(ath9k_hw_filltxdesc);
236
237void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds)
238{
239 struct ar5416_desc *ads = AR5416DESC(ds);
240
241 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
242 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
243 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
244 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
245 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
246}
247EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
248
249int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds,
250 struct ath_tx_status *ts)
251{
252 struct ar5416_desc *ads = AR5416DESC(ds);
253
254 if ((ads->ds_txstatus9 & AR_TxDone) == 0)
255 return -EINPROGRESS;
256
257 ts->ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
258 ts->ts_tstamp = ads->AR_SendTimestamp;
259 ts->ts_status = 0;
260 ts->ts_flags = 0;
261
262 if (ads->ds_txstatus1 & AR_FrmXmitOK)
263 ts->ts_status |= ATH9K_TX_ACKED;
264 if (ads->ds_txstatus1 & AR_ExcessiveRetries)
265 ts->ts_status |= ATH9K_TXERR_XRETRY;
266 if (ads->ds_txstatus1 & AR_Filtered)
267 ts->ts_status |= ATH9K_TXERR_FILT;
268 if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
269 ts->ts_status |= ATH9K_TXERR_FIFO;
270 ath9k_hw_updatetxtriglevel(ah, true);
271 }
272 if (ads->ds_txstatus9 & AR_TxOpExceeded)
273 ts->ts_status |= ATH9K_TXERR_XTXOP;
274 if (ads->ds_txstatus1 & AR_TxTimerExpired)
275 ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
276
277 if (ads->ds_txstatus1 & AR_DescCfgErr)
278 ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
279 if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
280 ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
281 ath9k_hw_updatetxtriglevel(ah, true);
282 }
283 if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
284 ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
285 ath9k_hw_updatetxtriglevel(ah, true);
286 }
287 if (ads->ds_txstatus0 & AR_TxBaStatus) {
288 ts->ts_flags |= ATH9K_TX_BA;
289 ts->ba_low = ads->AR_BaBitmapLow;
290 ts->ba_high = ads->AR_BaBitmapHigh;
291 }
292
293 ts->ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
294 switch (ts->ts_rateindex) {
295 case 0:
296 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
297 break;
298 case 1:
299 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
300 break;
301 case 2:
302 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
303 break;
304 case 3:
305 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
306 break;
307 }
308
309 ts->ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
310 ts->ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
311 ts->ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
312 ts->ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
313 ts->ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
314 ts->ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
315 ts->ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
316 ts->evm0 = ads->AR_TxEVM0;
317 ts->evm1 = ads->AR_TxEVM1;
318 ts->evm2 = ads->AR_TxEVM2;
319 ts->ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
320 ts->ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
321 ts->ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
322 ts->ts_antenna = 0;
323
324 return 0;
325}
326EXPORT_SYMBOL(ath9k_hw_txprocdesc);
327
328void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
329 u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
330 u32 keyIx, enum ath9k_key_type keyType, u32 flags)
331{
332 struct ar5416_desc *ads = AR5416DESC(ds);
333
334 txPower += ah->txpower_indexoffset;
335 if (txPower > 63)
336 txPower = 63;
337
338 ads->ds_ctl0 = (pktLen & AR_FrameLen)
339 | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
340 | SM(txPower, AR_XmitPower)
341 | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
342 | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
343 | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
344 | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
345
346 ads->ds_ctl1 =
347 (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
348 | SM(type, AR_FrameType)
349 | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
350 | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
351 | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
352
353 ads->ds_ctl6 = SM(keyType, AR_EncrType);
354
355 if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) {
356 ads->ds_ctl8 = 0;
357 ads->ds_ctl9 = 0;
358 ads->ds_ctl10 = 0;
359 ads->ds_ctl11 = 0;
360 }
361}
362EXPORT_SYMBOL(ath9k_hw_set11n_txdesc);
363
364void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
365 struct ath_desc *lastds,
366 u32 durUpdateEn, u32 rtsctsRate,
367 u32 rtsctsDuration,
368 struct ath9k_11n_rate_series series[],
369 u32 nseries, u32 flags)
370{
371 struct ar5416_desc *ads = AR5416DESC(ds);
372 struct ar5416_desc *last_ads = AR5416DESC(lastds);
373 u32 ds_ctl0;
374
375 if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
376 ds_ctl0 = ads->ds_ctl0;
377
378 if (flags & ATH9K_TXDESC_RTSENA) {
379 ds_ctl0 &= ~AR_CTSEnable;
380 ds_ctl0 |= AR_RTSEnable;
381 } else {
382 ds_ctl0 &= ~AR_RTSEnable;
383 ds_ctl0 |= AR_CTSEnable;
384 }
385
386 ads->ds_ctl0 = ds_ctl0;
387 } else {
388 ads->ds_ctl0 =
389 (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
390 }
391
392 ads->ds_ctl2 = set11nTries(series, 0)
393 | set11nTries(series, 1)
394 | set11nTries(series, 2)
395 | set11nTries(series, 3)
396 | (durUpdateEn ? AR_DurUpdateEna : 0)
397 | SM(0, AR_BurstDur);
398
399 ads->ds_ctl3 = set11nRate(series, 0)
400 | set11nRate(series, 1)
401 | set11nRate(series, 2)
402 | set11nRate(series, 3);
403
404 ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
405 | set11nPktDurRTSCTS(series, 1);
406
407 ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
408 | set11nPktDurRTSCTS(series, 3);
409
410 ads->ds_ctl7 = set11nRateFlags(series, 0)
411 | set11nRateFlags(series, 1)
412 | set11nRateFlags(series, 2)
413 | set11nRateFlags(series, 3)
414 | SM(rtsctsRate, AR_RTSCTSRate);
415 last_ads->ds_ctl2 = ads->ds_ctl2;
416 last_ads->ds_ctl3 = ads->ds_ctl3;
417}
418EXPORT_SYMBOL(ath9k_hw_set11n_ratescenario);
419
420void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
421 u32 aggrLen)
422{
423 struct ar5416_desc *ads = AR5416DESC(ds);
424
425 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
426 ads->ds_ctl6 &= ~AR_AggrLen;
427 ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
428}
429EXPORT_SYMBOL(ath9k_hw_set11n_aggr_first);
430
431void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
432 u32 numDelims)
433{
434 struct ar5416_desc *ads = AR5416DESC(ds);
435 unsigned int ctl6;
436
437 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
438
439 ctl6 = ads->ds_ctl6;
440 ctl6 &= ~AR_PadDelim;
441 ctl6 |= SM(numDelims, AR_PadDelim);
442 ads->ds_ctl6 = ctl6;
443}
444EXPORT_SYMBOL(ath9k_hw_set11n_aggr_middle);
445
446void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds)
447{
448 struct ar5416_desc *ads = AR5416DESC(ds);
449
450 ads->ds_ctl1 |= AR_IsAggr;
451 ads->ds_ctl1 &= ~AR_MoreAggr;
452 ads->ds_ctl6 &= ~AR_PadDelim;
453}
454EXPORT_SYMBOL(ath9k_hw_set11n_aggr_last);
455
456void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds)
457{
458 struct ar5416_desc *ads = AR5416DESC(ds);
459
460 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
461}
462EXPORT_SYMBOL(ath9k_hw_clr11n_aggr);
463
464void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
465 u32 burstDuration)
466{
467 struct ar5416_desc *ads = AR5416DESC(ds);
468
469 ads->ds_ctl2 &= ~AR_BurstDur;
470 ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
471}
472EXPORT_SYMBOL(ath9k_hw_set11n_burstduration);
473
474void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds,
475 u32 vmf)
476{
477 struct ar5416_desc *ads = AR5416DESC(ds);
478
479 if (vmf)
480 ads->ds_ctl0 |= AR_VirtMoreFrag;
481 else
482 ads->ds_ctl0 &= ~AR_VirtMoreFrag;
483}
484
485void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs) 227void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
486{ 228{
487 *txqs &= ah->intr_txqs; 229 *txqs &= ah->intr_txqs;
@@ -733,6 +475,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
733 } else 475 } else
734 cwMin = qi->tqi_cwmin; 476 cwMin = qi->tqi_cwmin;
735 477
478 ENABLE_REGWRITE_BUFFER(ah);
479
736 REG_WRITE(ah, AR_DLCL_IFS(q), 480 REG_WRITE(ah, AR_DLCL_IFS(q),
737 SM(cwMin, AR_D_LCL_IFS_CWMIN) | 481 SM(cwMin, AR_D_LCL_IFS_CWMIN) |
738 SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) | 482 SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
@@ -747,6 +491,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
747 REG_WRITE(ah, AR_DMISC(q), 491 REG_WRITE(ah, AR_DMISC(q),
748 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2); 492 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
749 493
494 REGWRITE_BUFFER_FLUSH(ah);
495
750 if (qi->tqi_cbrPeriod) { 496 if (qi->tqi_cbrPeriod) {
751 REG_WRITE(ah, AR_QCBRCFG(q), 497 REG_WRITE(ah, AR_QCBRCFG(q),
752 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) | 498 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
@@ -762,6 +508,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
762 AR_Q_RDYTIMECFG_EN); 508 AR_Q_RDYTIMECFG_EN);
763 } 509 }
764 510
511 REGWRITE_BUFFER_FLUSH(ah);
512
765 REG_WRITE(ah, AR_DCHNTIME(q), 513 REG_WRITE(ah, AR_DCHNTIME(q),
766 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) | 514 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
767 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0)); 515 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
@@ -779,6 +527,10 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
779 REG_READ(ah, AR_DMISC(q)) | 527 REG_READ(ah, AR_DMISC(q)) |
780 AR_D_MISC_POST_FR_BKOFF_DIS); 528 AR_D_MISC_POST_FR_BKOFF_DIS);
781 } 529 }
530
531 REGWRITE_BUFFER_FLUSH(ah);
532 DISABLE_REGWRITE_BUFFER(ah);
533
782 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) { 534 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
783 REG_WRITE(ah, AR_DMISC(q), 535 REG_WRITE(ah, AR_DMISC(q),
784 REG_READ(ah, AR_DMISC(q)) | 536 REG_READ(ah, AR_DMISC(q)) |
@@ -786,6 +538,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
786 } 538 }
787 switch (qi->tqi_type) { 539 switch (qi->tqi_type) {
788 case ATH9K_TX_QUEUE_BEACON: 540 case ATH9K_TX_QUEUE_BEACON:
541 ENABLE_REGWRITE_BUFFER(ah);
542
789 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q)) 543 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
790 | AR_Q_MISC_FSP_DBA_GATED 544 | AR_Q_MISC_FSP_DBA_GATED
791 | AR_Q_MISC_BEACON_USE 545 | AR_Q_MISC_BEACON_USE
@@ -796,8 +550,20 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
796 AR_D_MISC_ARB_LOCKOUT_CNTRL_S) 550 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
797 | AR_D_MISC_BEACON_USE 551 | AR_D_MISC_BEACON_USE
798 | AR_D_MISC_POST_FR_BKOFF_DIS); 552 | AR_D_MISC_POST_FR_BKOFF_DIS);
553
554 REGWRITE_BUFFER_FLUSH(ah);
555 DISABLE_REGWRITE_BUFFER(ah);
556
557 /* cwmin and cwmax should be 0 for beacon queue */
558 if (AR_SREV_9300_20_OR_LATER(ah)) {
559 REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
560 | SM(0, AR_D_LCL_IFS_CWMAX)
561 | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
562 }
799 break; 563 break;
800 case ATH9K_TX_QUEUE_CAB: 564 case ATH9K_TX_QUEUE_CAB:
565 ENABLE_REGWRITE_BUFFER(ah);
566
801 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q)) 567 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
802 | AR_Q_MISC_FSP_DBA_GATED 568 | AR_Q_MISC_FSP_DBA_GATED
803 | AR_Q_MISC_CBR_INCR_DIS1 569 | AR_Q_MISC_CBR_INCR_DIS1
@@ -811,6 +577,10 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
811 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) 577 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
812 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << 578 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
813 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)); 579 AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
580
581 REGWRITE_BUFFER_FLUSH(ah);
582 DISABLE_REGWRITE_BUFFER(ah);
583
814 break; 584 break;
815 case ATH9K_TX_QUEUE_PSPOLL: 585 case ATH9K_TX_QUEUE_PSPOLL:
816 REG_WRITE(ah, AR_QMISC(q), 586 REG_WRITE(ah, AR_QMISC(q),
@@ -832,6 +602,9 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
832 AR_D_MISC_POST_FR_BKOFF_DIS); 602 AR_D_MISC_POST_FR_BKOFF_DIS);
833 } 603 }
834 604
605 if (AR_SREV_9300_20_OR_LATER(ah))
606 REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN);
607
835 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE) 608 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
836 ah->txok_interrupt_mask |= 1 << q; 609 ah->txok_interrupt_mask |= 1 << q;
837 else 610 else
@@ -940,22 +713,6 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
940} 713}
941EXPORT_SYMBOL(ath9k_hw_rxprocdesc); 714EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
942 715
943void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
944 u32 size, u32 flags)
945{
946 struct ar5416_desc *ads = AR5416DESC(ds);
947 struct ath9k_hw_capabilities *pCap = &ah->caps;
948
949 ads->ds_ctl1 = size & AR_BufLen;
950 if (flags & ATH9K_RXDESC_INTREQ)
951 ads->ds_ctl1 |= AR_RxIntrReq;
952
953 ads->ds_rxstatus8 &= ~AR_RxDone;
954 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
955 memset(&(ads->u), 0, sizeof(ads->u));
956}
957EXPORT_SYMBOL(ath9k_hw_setuprxdesc);
958
959/* 716/*
960 * This can stop or re-enables RX. 717 * This can stop or re-enables RX.
961 * 718 *
@@ -999,12 +756,6 @@ void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
999} 756}
1000EXPORT_SYMBOL(ath9k_hw_putrxbuf); 757EXPORT_SYMBOL(ath9k_hw_putrxbuf);
1001 758
1002void ath9k_hw_rxena(struct ath_hw *ah)
1003{
1004 REG_WRITE(ah, AR_CR, AR_CR_RXE);
1005}
1006EXPORT_SYMBOL(ath9k_hw_rxena);
1007
1008void ath9k_hw_startpcureceive(struct ath_hw *ah) 759void ath9k_hw_startpcureceive(struct ath_hw *ah)
1009{ 760{
1010 ath9k_enable_mib_counters(ah); 761 ath9k_enable_mib_counters(ah);
@@ -1023,6 +774,14 @@ void ath9k_hw_stoppcurecv(struct ath_hw *ah)
1023} 774}
1024EXPORT_SYMBOL(ath9k_hw_stoppcurecv); 775EXPORT_SYMBOL(ath9k_hw_stoppcurecv);
1025 776
777void ath9k_hw_abortpcurecv(struct ath_hw *ah)
778{
779 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS);
780
781 ath9k_hw_disable_mib_counters(ah);
782}
783EXPORT_SYMBOL(ath9k_hw_abortpcurecv);
784
1026bool ath9k_hw_stopdmarecv(struct ath_hw *ah) 785bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
1027{ 786{
1028#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */ 787#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
@@ -1068,3 +827,142 @@ int ath9k_hw_beaconq_setup(struct ath_hw *ah)
1068 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi); 827 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
1069} 828}
1070EXPORT_SYMBOL(ath9k_hw_beaconq_setup); 829EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
830
831bool ath9k_hw_intrpend(struct ath_hw *ah)
832{
833 u32 host_isr;
834
835 if (AR_SREV_9100(ah))
836 return true;
837
838 host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
839 if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
840 return true;
841
842 host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
843 if ((host_isr & AR_INTR_SYNC_DEFAULT)
844 && (host_isr != AR_INTR_SPURIOUS))
845 return true;
846
847 return false;
848}
849EXPORT_SYMBOL(ath9k_hw_intrpend);
850
851enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
852 enum ath9k_int ints)
853{
854 enum ath9k_int omask = ah->imask;
855 u32 mask, mask2;
856 struct ath9k_hw_capabilities *pCap = &ah->caps;
857 struct ath_common *common = ath9k_hw_common(ah);
858
859 ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
860
861 if (omask & ATH9K_INT_GLOBAL) {
862 ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
863 REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
864 (void) REG_READ(ah, AR_IER);
865 if (!AR_SREV_9100(ah)) {
866 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
867 (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
868
869 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
870 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
871 }
872 }
873
874 /* TODO: global int Ref count */
875 mask = ints & ATH9K_INT_COMMON;
876 mask2 = 0;
877
878 if (ints & ATH9K_INT_TX) {
879 if (ah->config.tx_intr_mitigation)
880 mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
881 else {
882 if (ah->txok_interrupt_mask)
883 mask |= AR_IMR_TXOK;
884 if (ah->txdesc_interrupt_mask)
885 mask |= AR_IMR_TXDESC;
886 }
887 if (ah->txerr_interrupt_mask)
888 mask |= AR_IMR_TXERR;
889 if (ah->txeol_interrupt_mask)
890 mask |= AR_IMR_TXEOL;
891 }
892 if (ints & ATH9K_INT_RX) {
893 if (AR_SREV_9300_20_OR_LATER(ah)) {
894 mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
895 if (ah->config.rx_intr_mitigation) {
896 mask &= ~AR_IMR_RXOK_LP;
897 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
898 } else {
899 mask |= AR_IMR_RXOK_LP;
900 }
901 } else {
902 if (ah->config.rx_intr_mitigation)
903 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
904 else
905 mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
906 }
907 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
908 mask |= AR_IMR_GENTMR;
909 }
910
911 if (ints & (ATH9K_INT_BMISC)) {
912 mask |= AR_IMR_BCNMISC;
913 if (ints & ATH9K_INT_TIM)
914 mask2 |= AR_IMR_S2_TIM;
915 if (ints & ATH9K_INT_DTIM)
916 mask2 |= AR_IMR_S2_DTIM;
917 if (ints & ATH9K_INT_DTIMSYNC)
918 mask2 |= AR_IMR_S2_DTIMSYNC;
919 if (ints & ATH9K_INT_CABEND)
920 mask2 |= AR_IMR_S2_CABEND;
921 if (ints & ATH9K_INT_TSFOOR)
922 mask2 |= AR_IMR_S2_TSFOOR;
923 }
924
925 if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
926 mask |= AR_IMR_BCNMISC;
927 if (ints & ATH9K_INT_GTT)
928 mask2 |= AR_IMR_S2_GTT;
929 if (ints & ATH9K_INT_CST)
930 mask2 |= AR_IMR_S2_CST;
931 }
932
933 ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
934 REG_WRITE(ah, AR_IMR, mask);
935 ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
936 AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
937 AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
938 ah->imrs2_reg |= mask2;
939 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
940
941 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
942 if (ints & ATH9K_INT_TIM_TIMER)
943 REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
944 else
945 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
946 }
947
948 if (ints & ATH9K_INT_GLOBAL) {
949 ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
950 REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
951 if (!AR_SREV_9100(ah)) {
952 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
953 AR_INTR_MAC_IRQ);
954 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
955
956
957 REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
958 AR_INTR_SYNC_DEFAULT);
959 REG_WRITE(ah, AR_INTR_SYNC_MASK,
960 AR_INTR_SYNC_DEFAULT);
961 }
962 ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
963 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
964 }
965
966 return omask;
967}
968EXPORT_SYMBOL(ath9k_hw_set_interrupts);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 68dbd7a8ddca..00f3e0c7528a 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -37,6 +37,8 @@
37 AR_2040_##_index : 0) \ 37 AR_2040_##_index : 0) \
38 |((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \ 38 |((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \
39 AR_GI##_index : 0) \ 39 AR_GI##_index : 0) \
40 |((_series)[_index].RateFlags & ATH9K_RATESERIES_STBC ? \
41 AR_STBC##_index : 0) \
40 |SM((_series)[_index].ChSel, AR_ChainSel##_index)) 42 |SM((_series)[_index].ChSel, AR_ChainSel##_index))
41 43
42#define CCK_SIFS_TIME 10 44#define CCK_SIFS_TIME 10
@@ -86,7 +88,6 @@
86#define ATH9K_TX_DESC_CFG_ERR 0x04 88#define ATH9K_TX_DESC_CFG_ERR 0x04
87#define ATH9K_TX_DATA_UNDERRUN 0x08 89#define ATH9K_TX_DATA_UNDERRUN 0x08
88#define ATH9K_TX_DELIM_UNDERRUN 0x10 90#define ATH9K_TX_DELIM_UNDERRUN 0x10
89#define ATH9K_TX_SW_ABORTED 0x40
90#define ATH9K_TX_SW_FILTERED 0x80 91#define ATH9K_TX_SW_FILTERED 0x80
91 92
92/* 64 bytes */ 93/* 64 bytes */
@@ -117,7 +118,10 @@ struct ath_tx_status {
117 int8_t ts_rssi_ext0; 118 int8_t ts_rssi_ext0;
118 int8_t ts_rssi_ext1; 119 int8_t ts_rssi_ext1;
119 int8_t ts_rssi_ext2; 120 int8_t ts_rssi_ext2;
120 u8 pad[3]; 121 u8 qid;
122 u16 desc_id;
123 u8 tid;
124 u8 pad[2];
121 u32 ba_low; 125 u32 ba_low;
122 u32 ba_high; 126 u32 ba_high;
123 u32 evm0; 127 u32 evm0;
@@ -148,11 +152,13 @@ struct ath_rx_status {
148 u32 evm0; 152 u32 evm0;
149 u32 evm1; 153 u32 evm1;
150 u32 evm2; 154 u32 evm2;
155 u32 evm3;
156 u32 evm4;
151}; 157};
152 158
153struct ath_htc_rx_status { 159struct ath_htc_rx_status {
154 u64 rs_tstamp; 160 __be64 rs_tstamp;
155 u16 rs_datalen; 161 __be16 rs_datalen;
156 u8 rs_status; 162 u8 rs_status;
157 u8 rs_phyerr; 163 u8 rs_phyerr;
158 int8_t rs_rssi; 164 int8_t rs_rssi;
@@ -171,9 +177,9 @@ struct ath_htc_rx_status {
171 u8 rs_num_delims; 177 u8 rs_num_delims;
172 u8 rs_flags; 178 u8 rs_flags;
173 u8 rs_dummy; 179 u8 rs_dummy;
174 u32 evm0; 180 __be32 evm0;
175 u32 evm1; 181 __be32 evm1;
176 u32 evm2; 182 __be32 evm2;
177}; 183};
178 184
179#define ATH9K_RXERR_CRC 0x01 185#define ATH9K_RXERR_CRC 0x01
@@ -259,7 +265,8 @@ struct ath_desc {
259#define ATH9K_TXDESC_EXT_AND_CTL 0x0080 265#define ATH9K_TXDESC_EXT_AND_CTL 0x0080
260#define ATH9K_TXDESC_VMF 0x0100 266#define ATH9K_TXDESC_VMF 0x0100
261#define ATH9K_TXDESC_FRAG_IS_ON 0x0200 267#define ATH9K_TXDESC_FRAG_IS_ON 0x0200
262#define ATH9K_TXDESC_CAB 0x0400 268#define ATH9K_TXDESC_LOWRXCHAIN 0x0400
269#define ATH9K_TXDESC_LDPC 0x00010000
263 270
264#define ATH9K_RXDESC_INTREQ 0x0020 271#define ATH9K_RXDESC_INTREQ 0x0020
265 272
@@ -353,7 +360,6 @@ struct ar5416_desc {
353#define AR_DestIdxValid 0x40000000 360#define AR_DestIdxValid 0x40000000
354#define AR_CTSEnable 0x80000000 361#define AR_CTSEnable 0x80000000
355 362
356#define AR_BufLen 0x00000fff
357#define AR_TxMore 0x00001000 363#define AR_TxMore 0x00001000
358#define AR_DestIdx 0x000fe000 364#define AR_DestIdx 0x000fe000
359#define AR_DestIdx_S 13 365#define AR_DestIdx_S 13
@@ -410,6 +416,7 @@ struct ar5416_desc {
410#define AR_EncrType 0x0c000000 416#define AR_EncrType 0x0c000000
411#define AR_EncrType_S 26 417#define AR_EncrType_S 26
412#define AR_TxCtlRsvd61 0xf0000000 418#define AR_TxCtlRsvd61 0xf0000000
419#define AR_LDPC 0x80000000
413 420
414#define AR_2040_0 0x00000001 421#define AR_2040_0 0x00000001
415#define AR_GI0 0x00000002 422#define AR_GI0 0x00000002
@@ -429,7 +436,10 @@ struct ar5416_desc {
429#define AR_ChainSel3_S 17 436#define AR_ChainSel3_S 17
430#define AR_RTSCTSRate 0x0ff00000 437#define AR_RTSCTSRate 0x0ff00000
431#define AR_RTSCTSRate_S 20 438#define AR_RTSCTSRate_S 20
432#define AR_TxCtlRsvd70 0xf0000000 439#define AR_STBC0 0x10000000
440#define AR_STBC1 0x20000000
441#define AR_STBC2 0x40000000
442#define AR_STBC3 0x80000000
433 443
434#define AR_TxRSSIAnt00 0x000000ff 444#define AR_TxRSSIAnt00 0x000000ff
435#define AR_TxRSSIAnt00_S 0 445#define AR_TxRSSIAnt00_S 0
@@ -493,7 +503,6 @@ struct ar5416_desc {
493 503
494#define AR_RxCTLRsvd00 0xffffffff 504#define AR_RxCTLRsvd00 0xffffffff
495 505
496#define AR_BufLen 0x00000fff
497#define AR_RxCtlRsvd00 0x00001000 506#define AR_RxCtlRsvd00 0x00001000
498#define AR_RxIntrReq 0x00002000 507#define AR_RxIntrReq 0x00002000
499#define AR_RxCtlRsvd01 0xffffc000 508#define AR_RxCtlRsvd01 0xffffc000
@@ -643,6 +652,7 @@ enum ath9k_rx_filter {
643#define ATH9K_RATESERIES_RTS_CTS 0x0001 652#define ATH9K_RATESERIES_RTS_CTS 0x0001
644#define ATH9K_RATESERIES_2040 0x0002 653#define ATH9K_RATESERIES_2040 0x0002
645#define ATH9K_RATESERIES_HALFGI 0x0004 654#define ATH9K_RATESERIES_HALFGI 0x0004
655#define ATH9K_RATESERIES_STBC 0x0008
646 656
647struct ath9k_11n_rate_series { 657struct ath9k_11n_rate_series {
648 u32 Tries; 658 u32 Tries;
@@ -686,34 +696,10 @@ struct ath9k_channel;
686u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q); 696u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q);
687void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp); 697void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp);
688void ath9k_hw_txstart(struct ath_hw *ah, u32 q); 698void ath9k_hw_txstart(struct ath_hw *ah, u32 q);
699void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds);
689u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q); 700u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q);
690bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel); 701bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel);
691bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q); 702bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q);
692void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
693 u32 segLen, bool firstSeg,
694 bool lastSeg, const struct ath_desc *ds0);
695void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds);
696int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds,
697 struct ath_tx_status *ts);
698void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
699 u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
700 u32 keyIx, enum ath9k_key_type keyType, u32 flags);
701void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
702 struct ath_desc *lastds,
703 u32 durUpdateEn, u32 rtsctsRate,
704 u32 rtsctsDuration,
705 struct ath9k_11n_rate_series series[],
706 u32 nseries, u32 flags);
707void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
708 u32 aggrLen);
709void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
710 u32 numDelims);
711void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds);
712void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds);
713void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
714 u32 burstDuration);
715void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds,
716 u32 vmf);
717void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs); 703void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs);
718bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q, 704bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
719 const struct ath9k_tx_queue_info *qinfo); 705 const struct ath9k_tx_queue_info *qinfo);
@@ -729,10 +715,17 @@ void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
729 u32 size, u32 flags); 715 u32 size, u32 flags);
730bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set); 716bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);
731void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp); 717void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp);
732void ath9k_hw_rxena(struct ath_hw *ah);
733void ath9k_hw_startpcureceive(struct ath_hw *ah); 718void ath9k_hw_startpcureceive(struct ath_hw *ah);
734void ath9k_hw_stoppcurecv(struct ath_hw *ah); 719void ath9k_hw_stoppcurecv(struct ath_hw *ah);
720void ath9k_hw_abortpcurecv(struct ath_hw *ah);
735bool ath9k_hw_stopdmarecv(struct ath_hw *ah); 721bool ath9k_hw_stopdmarecv(struct ath_hw *ah);
736int ath9k_hw_beaconq_setup(struct ath_hw *ah); 722int ath9k_hw_beaconq_setup(struct ath_hw *ah);
737 723
724/* Interrupt Handling */
725bool ath9k_hw_intrpend(struct ath_hw *ah);
726enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
727 enum ath9k_int ints);
728
729void ar9002_hw_attach_mac_ops(struct ath_hw *ah);
730
738#endif /* MAC_H */ 731#endif /* MAC_H */
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index f7ef11407e27..893b552981a0 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -401,23 +401,41 @@ void ath9k_tasklet(unsigned long data)
401 struct ath_common *common = ath9k_hw_common(ah); 401 struct ath_common *common = ath9k_hw_common(ah);
402 402
403 u32 status = sc->intrstatus; 403 u32 status = sc->intrstatus;
404 u32 rxmask;
404 405
405 ath9k_ps_wakeup(sc); 406 ath9k_ps_wakeup(sc);
406 407
407 if (status & ATH9K_INT_FATAL) { 408 if ((status & ATH9K_INT_FATAL) ||
409 !ath9k_hw_check_alive(ah)) {
408 ath_reset(sc, false); 410 ath_reset(sc, false);
409 ath9k_ps_restore(sc); 411 ath9k_ps_restore(sc);
410 return; 412 return;
411 } 413 }
412 414
413 if (status & (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) { 415 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
416 rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL |
417 ATH9K_INT_RXORN);
418 else
419 rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
420
421 if (status & rxmask) {
414 spin_lock_bh(&sc->rx.rxflushlock); 422 spin_lock_bh(&sc->rx.rxflushlock);
415 ath_rx_tasklet(sc, 0); 423
424 /* Check for high priority Rx first */
425 if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
426 (status & ATH9K_INT_RXHP))
427 ath_rx_tasklet(sc, 0, true);
428
429 ath_rx_tasklet(sc, 0, false);
416 spin_unlock_bh(&sc->rx.rxflushlock); 430 spin_unlock_bh(&sc->rx.rxflushlock);
417 } 431 }
418 432
419 if (status & ATH9K_INT_TX) 433 if (status & ATH9K_INT_TX) {
420 ath_tx_tasklet(sc); 434 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
435 ath_tx_edma_tasklet(sc);
436 else
437 ath_tx_tasklet(sc);
438 }
421 439
422 if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) { 440 if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
423 /* 441 /*
@@ -445,6 +463,8 @@ irqreturn_t ath_isr(int irq, void *dev)
445 ATH9K_INT_RXORN | \ 463 ATH9K_INT_RXORN | \
446 ATH9K_INT_RXEOL | \ 464 ATH9K_INT_RXEOL | \
447 ATH9K_INT_RX | \ 465 ATH9K_INT_RX | \
466 ATH9K_INT_RXLP | \
467 ATH9K_INT_RXHP | \
448 ATH9K_INT_TX | \ 468 ATH9K_INT_TX | \
449 ATH9K_INT_BMISS | \ 469 ATH9K_INT_BMISS | \
450 ATH9K_INT_CST | \ 470 ATH9K_INT_CST | \
@@ -496,7 +516,8 @@ irqreturn_t ath_isr(int irq, void *dev)
496 * If a FATAL or RXORN interrupt is received, we have to reset the 516 * If a FATAL or RXORN interrupt is received, we have to reset the
497 * chip immediately. 517 * chip immediately.
498 */ 518 */
499 if (status & (ATH9K_INT_FATAL | ATH9K_INT_RXORN)) 519 if ((status & ATH9K_INT_FATAL) || ((status & ATH9K_INT_RXORN) &&
520 !(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)))
500 goto chip_reset; 521 goto chip_reset;
501 522
502 if (status & ATH9K_INT_SWBA) 523 if (status & ATH9K_INT_SWBA)
@@ -505,6 +526,13 @@ irqreturn_t ath_isr(int irq, void *dev)
505 if (status & ATH9K_INT_TXURN) 526 if (status & ATH9K_INT_TXURN)
506 ath9k_hw_updatetxtriglevel(ah, true); 527 ath9k_hw_updatetxtriglevel(ah, true);
507 528
529 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
530 if (status & ATH9K_INT_RXEOL) {
531 ah->imask &= ~(ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
532 ath9k_hw_set_interrupts(ah, ah->imask);
533 }
534 }
535
508 if (status & ATH9K_INT_MIB) { 536 if (status & ATH9K_INT_MIB) {
509 /* 537 /*
510 * Disable interrupts until we service the MIB 538 * Disable interrupts until we service the MIB
@@ -724,6 +752,7 @@ static int ath_key_config(struct ath_common *common,
724 struct ath_hw *ah = common->ah; 752 struct ath_hw *ah = common->ah;
725 struct ath9k_keyval hk; 753 struct ath9k_keyval hk;
726 const u8 *mac = NULL; 754 const u8 *mac = NULL;
755 u8 gmac[ETH_ALEN];
727 int ret = 0; 756 int ret = 0;
728 int idx; 757 int idx;
729 758
@@ -747,9 +776,30 @@ static int ath_key_config(struct ath_common *common,
747 memcpy(hk.kv_val, key->key, key->keylen); 776 memcpy(hk.kv_val, key->key, key->keylen);
748 777
749 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { 778 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
750 /* For now, use the default keys for broadcast keys. This may 779
751 * need to change with virtual interfaces. */ 780 if (key->ap_addr) {
752 idx = key->keyidx; 781 /*
782 * Group keys on hardware that supports multicast frame
783 * key search use a mac that is the sender's address with
784 * the high bit set instead of the app-specified address.
785 */
786 memcpy(gmac, key->ap_addr, ETH_ALEN);
787 gmac[0] |= 0x80;
788 mac = gmac;
789
790 if (key->alg == ALG_TKIP)
791 idx = ath_reserve_key_cache_slot_tkip(common);
792 else
793 idx = ath_reserve_key_cache_slot(common);
794 if (idx < 0)
795 mac = NULL; /* no free key cache entries */
796 }
797
798 if (!mac) {
799 /* For now, use the default keys for broadcast keys. This may
800 * need to change with virtual interfaces. */
801 idx = key->keyidx;
802 }
753 } else if (key->keyidx) { 803 } else if (key->keyidx) {
754 if (WARN_ON(!sta)) 804 if (WARN_ON(!sta))
755 return -EOPNOTSUPP; 805 return -EOPNOTSUPP;
@@ -1162,9 +1212,14 @@ static int ath9k_start(struct ieee80211_hw *hw)
1162 } 1212 }
1163 1213
1164 /* Setup our intr mask. */ 1214 /* Setup our intr mask. */
1165 ah->imask = ATH9K_INT_RX | ATH9K_INT_TX 1215 ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL |
1166 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN 1216 ATH9K_INT_RXORN | ATH9K_INT_FATAL |
1167 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL; 1217 ATH9K_INT_GLOBAL;
1218
1219 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
1220 ah->imask |= ATH9K_INT_RXHP | ATH9K_INT_RXLP;
1221 else
1222 ah->imask |= ATH9K_INT_RX;
1168 1223
1169 if (ah->caps.hw_caps & ATH9K_HW_CAP_GTT) 1224 if (ah->caps.hw_caps & ATH9K_HW_CAP_GTT)
1170 ah->imask |= ATH9K_INT_GTT; 1225 ah->imask |= ATH9K_INT_GTT;
@@ -1436,7 +1491,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1436 if ((vif->type == NL80211_IFTYPE_STATION) || 1491 if ((vif->type == NL80211_IFTYPE_STATION) ||
1437 (vif->type == NL80211_IFTYPE_ADHOC) || 1492 (vif->type == NL80211_IFTYPE_ADHOC) ||
1438 (vif->type == NL80211_IFTYPE_MESH_POINT)) { 1493 (vif->type == NL80211_IFTYPE_MESH_POINT)) {
1439 ah->imask |= ATH9K_INT_MIB; 1494 if (ah->config.enable_ani)
1495 ah->imask |= ATH9K_INT_MIB;
1440 ah->imask |= ATH9K_INT_TSFOOR; 1496 ah->imask |= ATH9K_INT_TSFOOR;
1441 } 1497 }
1442 1498
@@ -1988,6 +2044,25 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
1988 return ret; 2044 return ret;
1989} 2045}
1990 2046
2047static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
2048 struct survey_info *survey)
2049{
2050 struct ath_wiphy *aphy = hw->priv;
2051 struct ath_softc *sc = aphy->sc;
2052 struct ath_hw *ah = sc->sc_ah;
2053 struct ath_common *common = ath9k_hw_common(ah);
2054 struct ieee80211_conf *conf = &hw->conf;
2055
2056 if (idx != 0)
2057 return -ENOENT;
2058
2059 survey->channel = conf->channel;
2060 survey->filled = SURVEY_INFO_NOISE_DBM;
2061 survey->noise = common->ani.noise_floor;
2062
2063 return 0;
2064}
2065
1991static void ath9k_sw_scan_start(struct ieee80211_hw *hw) 2066static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
1992{ 2067{
1993 struct ath_wiphy *aphy = hw->priv; 2068 struct ath_wiphy *aphy = hw->priv;
@@ -2059,6 +2134,7 @@ struct ieee80211_ops ath9k_ops = {
2059 .set_tsf = ath9k_set_tsf, 2134 .set_tsf = ath9k_set_tsf,
2060 .reset_tsf = ath9k_reset_tsf, 2135 .reset_tsf = ath9k_reset_tsf,
2061 .ampdu_action = ath9k_ampdu_action, 2136 .ampdu_action = ath9k_ampdu_action,
2137 .get_survey = ath9k_get_survey,
2062 .sw_scan_start = ath9k_sw_scan_start, 2138 .sw_scan_start = ath9k_sw_scan_start,
2063 .sw_scan_complete = ath9k_sw_scan_complete, 2139 .sw_scan_complete = ath9k_sw_scan_complete,
2064 .rfkill_poll = ath9k_rfkill_poll_state, 2140 .rfkill_poll = ath9k_rfkill_poll_state,
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 1ec836cf1c0d..257b10ba6f57 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -28,6 +28,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
28 { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */ 28 { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
29 { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */ 29 { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
30 { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */ 30 { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
31 { PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */
31 { 0 } 32 { 0 }
32}; 33};
33 34
diff --git a/drivers/net/wireless/ath/ath9k/phy.c b/drivers/net/wireless/ath/ath9k/phy.c
deleted file mode 100644
index 2547b3c4a26c..000000000000
--- a/drivers/net/wireless/ath/ath9k/phy.c
+++ /dev/null
@@ -1,978 +0,0 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/**
18 * DOC: Programming Atheros 802.11n analog front end radios
19 *
20 * AR5416 MAC based PCI devices and AR518 MAC based PCI-Express
21 * devices have either an external AR2133 analog front end radio for single
22 * band 2.4 GHz communication or an AR5133 analog front end radio for dual
23 * band 2.4 GHz / 5 GHz communication.
24 *
25 * All devices after the AR5416 and AR5418 family starting with the AR9280
26 * have their analog front radios, MAC/BB and host PCIe/USB interface embedded
27 * into a single-chip and require less programming.
28 *
29 * The following single-chips exist with a respective embedded radio:
30 *
31 * AR9280 - 11n dual-band 2x2 MIMO for PCIe
32 * AR9281 - 11n single-band 1x2 MIMO for PCIe
33 * AR9285 - 11n single-band 1x1 for PCIe
34 * AR9287 - 11n single-band 2x2 MIMO for PCIe
35 *
36 * AR9220 - 11n dual-band 2x2 MIMO for PCI
37 * AR9223 - 11n single-band 2x2 MIMO for PCI
38 *
39 * AR9287 - 11n single-band 1x1 MIMO for USB
40 */
41
42#include <linux/slab.h>
43
44#include "hw.h"
45
46/**
47 * ath9k_hw_write_regs - ??
48 *
49 * @ah: atheros hardware structure
50 * @freqIndex:
51 * @regWrites:
52 *
53 * Used for both the chipsets with an external AR2133/AR5133 radios and
54 * single-chip devices.
55 */
56void ath9k_hw_write_regs(struct ath_hw *ah, u32 freqIndex, int regWrites)
57{
58 REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites);
59}
60
61/**
62 * ath9k_hw_ar9280_set_channel - set channel on single-chip device
63 * @ah: atheros hardware structure
64 * @chan:
65 *
66 * This is the function to change channel on single-chip devices, that is
67 * all devices after ar9280.
68 *
69 * This function takes the channel value in MHz and sets
70 * hardware channel value. Assumes writes have been enabled to analog bus.
71 *
72 * Actual Expression,
73 *
74 * For 2GHz channel,
75 * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
76 * (freq_ref = 40MHz)
77 *
78 * For 5GHz channel,
79 * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
80 * (freq_ref = 40MHz/(24>>amodeRefSel))
81 */
82int ath9k_hw_ar9280_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
83{
84 u16 bMode, fracMode, aModeRefSel = 0;
85 u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0;
86 struct chan_centers centers;
87 u32 refDivA = 24;
88
89 ath9k_hw_get_channel_centers(ah, chan, &centers);
90 freq = centers.synth_center;
91
92 reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL);
93 reg32 &= 0xc0000000;
94
95 if (freq < 4800) { /* 2 GHz, fractional mode */
96 u32 txctl;
97 int regWrites = 0;
98
99 bMode = 1;
100 fracMode = 1;
101 aModeRefSel = 0;
102 channelSel = (freq * 0x10000) / 15;
103
104 if (AR_SREV_9287_11_OR_LATER(ah)) {
105 if (freq == 2484) {
106 /* Enable channel spreading for channel 14 */
107 REG_WRITE_ARRAY(&ah->iniCckfirJapan2484,
108 1, regWrites);
109 } else {
110 REG_WRITE_ARRAY(&ah->iniCckfirNormal,
111 1, regWrites);
112 }
113 } else {
114 txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
115 if (freq == 2484) {
116 /* Enable channel spreading for channel 14 */
117 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
118 txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
119 } else {
120 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
121 txctl &~ AR_PHY_CCK_TX_CTRL_JAPAN);
122 }
123 }
124 } else {
125 bMode = 0;
126 fracMode = 0;
127
128 switch(ah->eep_ops->get_eeprom(ah, EEP_FRAC_N_5G)) {
129 case 0:
130 if ((freq % 20) == 0) {
131 aModeRefSel = 3;
132 } else if ((freq % 10) == 0) {
133 aModeRefSel = 2;
134 }
135 if (aModeRefSel)
136 break;
137 case 1:
138 default:
139 aModeRefSel = 0;
140 /*
141 * Enable 2G (fractional) mode for channels
142 * which are 5MHz spaced.
143 */
144 fracMode = 1;
145 refDivA = 1;
146 channelSel = (freq * 0x8000) / 15;
147
148 /* RefDivA setting */
149 REG_RMW_FIELD(ah, AR_AN_SYNTH9,
150 AR_AN_SYNTH9_REFDIVA, refDivA);
151
152 }
153
154 if (!fracMode) {
155 ndiv = (freq * (refDivA >> aModeRefSel)) / 60;
156 channelSel = ndiv & 0x1ff;
157 channelFrac = (ndiv & 0xfffffe00) * 2;
158 channelSel = (channelSel << 17) | channelFrac;
159 }
160 }
161
162 reg32 = reg32 |
163 (bMode << 29) |
164 (fracMode << 28) | (aModeRefSel << 26) | (channelSel);
165
166 REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
167
168 ah->curchan = chan;
169 ah->curchan_rad_index = -1;
170
171 return 0;
172}
173
174/**
175 * ath9k_hw_9280_spur_mitigate - convert baseband spur frequency
176 * @ah: atheros hardware structure
177 * @chan:
178 *
179 * For single-chip solutions. Converts to baseband spur frequency given the
180 * input channel frequency and compute register settings below.
181 */
182void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
183{
184 int bb_spur = AR_NO_SPUR;
185 int freq;
186 int bin, cur_bin;
187 int bb_spur_off, spur_subchannel_sd;
188 int spur_freq_sd;
189 int spur_delta_phase;
190 int denominator;
191 int upper, lower, cur_vit_mask;
192 int tmp, newVal;
193 int i;
194 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
195 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
196 };
197 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
198 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
199 };
200 int inc[4] = { 0, 100, 0, 0 };
201 struct chan_centers centers;
202
203 int8_t mask_m[123];
204 int8_t mask_p[123];
205 int8_t mask_amt;
206 int tmp_mask;
207 int cur_bb_spur;
208 bool is2GHz = IS_CHAN_2GHZ(chan);
209
210 memset(&mask_m, 0, sizeof(int8_t) * 123);
211 memset(&mask_p, 0, sizeof(int8_t) * 123);
212
213 ath9k_hw_get_channel_centers(ah, chan, &centers);
214 freq = centers.synth_center;
215
216 ah->config.spurmode = SPUR_ENABLE_EEPROM;
217 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
218 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
219
220 if (is2GHz)
221 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
222 else
223 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
224
225 if (AR_NO_SPUR == cur_bb_spur)
226 break;
227 cur_bb_spur = cur_bb_spur - freq;
228
229 if (IS_CHAN_HT40(chan)) {
230 if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) &&
231 (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) {
232 bb_spur = cur_bb_spur;
233 break;
234 }
235 } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) &&
236 (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) {
237 bb_spur = cur_bb_spur;
238 break;
239 }
240 }
241
242 if (AR_NO_SPUR == bb_spur) {
243 REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
244 AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
245 return;
246 } else {
247 REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
248 AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
249 }
250
251 bin = bb_spur * 320;
252
253 tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
254
255 newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
256 AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
257 AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
258 AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
259 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal);
260
261 newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
262 AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
263 AR_PHY_SPUR_REG_MASK_RATE_SELECT |
264 AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
265 SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
266 REG_WRITE(ah, AR_PHY_SPUR_REG, newVal);
267
268 if (IS_CHAN_HT40(chan)) {
269 if (bb_spur < 0) {
270 spur_subchannel_sd = 1;
271 bb_spur_off = bb_spur + 10;
272 } else {
273 spur_subchannel_sd = 0;
274 bb_spur_off = bb_spur - 10;
275 }
276 } else {
277 spur_subchannel_sd = 0;
278 bb_spur_off = bb_spur;
279 }
280
281 if (IS_CHAN_HT40(chan))
282 spur_delta_phase =
283 ((bb_spur * 262144) /
284 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
285 else
286 spur_delta_phase =
287 ((bb_spur * 524288) /
288 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
289
290 denominator = IS_CHAN_2GHZ(chan) ? 44 : 40;
291 spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff;
292
293 newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
294 SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
295 SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
296 REG_WRITE(ah, AR_PHY_TIMING11, newVal);
297
298 newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
299 REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
300
301 cur_bin = -6000;
302 upper = bin + 100;
303 lower = bin - 100;
304
305 for (i = 0; i < 4; i++) {
306 int pilot_mask = 0;
307 int chan_mask = 0;
308 int bp = 0;
309 for (bp = 0; bp < 30; bp++) {
310 if ((cur_bin > lower) && (cur_bin < upper)) {
311 pilot_mask = pilot_mask | 0x1 << bp;
312 chan_mask = chan_mask | 0x1 << bp;
313 }
314 cur_bin += 100;
315 }
316 cur_bin += inc[i];
317 REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
318 REG_WRITE(ah, chan_mask_reg[i], chan_mask);
319 }
320
321 cur_vit_mask = 6100;
322 upper = bin + 120;
323 lower = bin - 120;
324
325 for (i = 0; i < 123; i++) {
326 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
327
328 /* workaround for gcc bug #37014 */
329 volatile int tmp_v = abs(cur_vit_mask - bin);
330
331 if (tmp_v < 75)
332 mask_amt = 1;
333 else
334 mask_amt = 0;
335 if (cur_vit_mask < 0)
336 mask_m[abs(cur_vit_mask / 100)] = mask_amt;
337 else
338 mask_p[cur_vit_mask / 100] = mask_amt;
339 }
340 cur_vit_mask -= 100;
341 }
342
343 tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
344 | (mask_m[48] << 26) | (mask_m[49] << 24)
345 | (mask_m[50] << 22) | (mask_m[51] << 20)
346 | (mask_m[52] << 18) | (mask_m[53] << 16)
347 | (mask_m[54] << 14) | (mask_m[55] << 12)
348 | (mask_m[56] << 10) | (mask_m[57] << 8)
349 | (mask_m[58] << 6) | (mask_m[59] << 4)
350 | (mask_m[60] << 2) | (mask_m[61] << 0);
351 REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
352 REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
353
354 tmp_mask = (mask_m[31] << 28)
355 | (mask_m[32] << 26) | (mask_m[33] << 24)
356 | (mask_m[34] << 22) | (mask_m[35] << 20)
357 | (mask_m[36] << 18) | (mask_m[37] << 16)
358 | (mask_m[48] << 14) | (mask_m[39] << 12)
359 | (mask_m[40] << 10) | (mask_m[41] << 8)
360 | (mask_m[42] << 6) | (mask_m[43] << 4)
361 | (mask_m[44] << 2) | (mask_m[45] << 0);
362 REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
363 REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
364
365 tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
366 | (mask_m[18] << 26) | (mask_m[18] << 24)
367 | (mask_m[20] << 22) | (mask_m[20] << 20)
368 | (mask_m[22] << 18) | (mask_m[22] << 16)
369 | (mask_m[24] << 14) | (mask_m[24] << 12)
370 | (mask_m[25] << 10) | (mask_m[26] << 8)
371 | (mask_m[27] << 6) | (mask_m[28] << 4)
372 | (mask_m[29] << 2) | (mask_m[30] << 0);
373 REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
374 REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
375
376 tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
377 | (mask_m[2] << 26) | (mask_m[3] << 24)
378 | (mask_m[4] << 22) | (mask_m[5] << 20)
379 | (mask_m[6] << 18) | (mask_m[7] << 16)
380 | (mask_m[8] << 14) | (mask_m[9] << 12)
381 | (mask_m[10] << 10) | (mask_m[11] << 8)
382 | (mask_m[12] << 6) | (mask_m[13] << 4)
383 | (mask_m[14] << 2) | (mask_m[15] << 0);
384 REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
385 REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
386
387 tmp_mask = (mask_p[15] << 28)
388 | (mask_p[14] << 26) | (mask_p[13] << 24)
389 | (mask_p[12] << 22) | (mask_p[11] << 20)
390 | (mask_p[10] << 18) | (mask_p[9] << 16)
391 | (mask_p[8] << 14) | (mask_p[7] << 12)
392 | (mask_p[6] << 10) | (mask_p[5] << 8)
393 | (mask_p[4] << 6) | (mask_p[3] << 4)
394 | (mask_p[2] << 2) | (mask_p[1] << 0);
395 REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
396 REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
397
398 tmp_mask = (mask_p[30] << 28)
399 | (mask_p[29] << 26) | (mask_p[28] << 24)
400 | (mask_p[27] << 22) | (mask_p[26] << 20)
401 | (mask_p[25] << 18) | (mask_p[24] << 16)
402 | (mask_p[23] << 14) | (mask_p[22] << 12)
403 | (mask_p[21] << 10) | (mask_p[20] << 8)
404 | (mask_p[19] << 6) | (mask_p[18] << 4)
405 | (mask_p[17] << 2) | (mask_p[16] << 0);
406 REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
407 REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
408
409 tmp_mask = (mask_p[45] << 28)
410 | (mask_p[44] << 26) | (mask_p[43] << 24)
411 | (mask_p[42] << 22) | (mask_p[41] << 20)
412 | (mask_p[40] << 18) | (mask_p[39] << 16)
413 | (mask_p[38] << 14) | (mask_p[37] << 12)
414 | (mask_p[36] << 10) | (mask_p[35] << 8)
415 | (mask_p[34] << 6) | (mask_p[33] << 4)
416 | (mask_p[32] << 2) | (mask_p[31] << 0);
417 REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
418 REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
419
420 tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
421 | (mask_p[59] << 26) | (mask_p[58] << 24)
422 | (mask_p[57] << 22) | (mask_p[56] << 20)
423 | (mask_p[55] << 18) | (mask_p[54] << 16)
424 | (mask_p[53] << 14) | (mask_p[52] << 12)
425 | (mask_p[51] << 10) | (mask_p[50] << 8)
426 | (mask_p[49] << 6) | (mask_p[48] << 4)
427 | (mask_p[47] << 2) | (mask_p[46] << 0);
428 REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
429 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
430}
431
432/* All code below is for non single-chip solutions */
433
434/**
435 * ath9k_phy_modify_rx_buffer() - perform analog swizzling of parameters
436 * @rfbuf:
437 * @reg32:
438 * @numBits:
439 * @firstBit:
440 * @column:
441 *
442 * Performs analog "swizzling" of parameters into their location.
443 * Used on external AR2133/AR5133 radios.
444 */
445static void ath9k_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
446 u32 numBits, u32 firstBit,
447 u32 column)
448{
449 u32 tmp32, mask, arrayEntry, lastBit;
450 int32_t bitPosition, bitsLeft;
451
452 tmp32 = ath9k_hw_reverse_bits(reg32, numBits);
453 arrayEntry = (firstBit - 1) / 8;
454 bitPosition = (firstBit - 1) % 8;
455 bitsLeft = numBits;
456 while (bitsLeft > 0) {
457 lastBit = (bitPosition + bitsLeft > 8) ?
458 8 : bitPosition + bitsLeft;
459 mask = (((1 << lastBit) - 1) ^ ((1 << bitPosition) - 1)) <<
460 (column * 8);
461 rfBuf[arrayEntry] &= ~mask;
462 rfBuf[arrayEntry] |= ((tmp32 << bitPosition) <<
463 (column * 8)) & mask;
464 bitsLeft -= 8 - bitPosition;
465 tmp32 = tmp32 >> (8 - bitPosition);
466 bitPosition = 0;
467 arrayEntry++;
468 }
469}
470
471/*
472 * Fix on 2.4 GHz band for orientation sensitivity issue by increasing
473 * rf_pwd_icsyndiv.
474 *
475 * Theoretical Rules:
476 * if 2 GHz band
477 * if forceBiasAuto
478 * if synth_freq < 2412
479 * bias = 0
480 * else if 2412 <= synth_freq <= 2422
481 * bias = 1
482 * else // synth_freq > 2422
483 * bias = 2
484 * else if forceBias > 0
485 * bias = forceBias & 7
486 * else
487 * no change, use value from ini file
488 * else
489 * no change, invalid band
490 *
491 * 1st Mod:
492 * 2422 also uses value of 2
493 * <approved>
494 *
495 * 2nd Mod:
496 * Less than 2412 uses value of 0, 2412 and above uses value of 2
497 */
498static void ath9k_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
499{
500 struct ath_common *common = ath9k_hw_common(ah);
501 u32 tmp_reg;
502 int reg_writes = 0;
503 u32 new_bias = 0;
504
505 if (!AR_SREV_5416(ah) || synth_freq >= 3000) {
506 return;
507 }
508
509 BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
510
511 if (synth_freq < 2412)
512 new_bias = 0;
513 else if (synth_freq < 2422)
514 new_bias = 1;
515 else
516 new_bias = 2;
517
518 /* pre-reverse this field */
519 tmp_reg = ath9k_hw_reverse_bits(new_bias, 3);
520
521 ath_print(common, ATH_DBG_CONFIG,
522 "Force rf_pwd_icsyndiv to %1d on %4d\n",
523 new_bias, synth_freq);
524
525 /* swizzle rf_pwd_icsyndiv */
526 ath9k_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3);
527
528 /* write Bank 6 with new params */
529 REG_WRITE_RF_ARRAY(&ah->iniBank6, ah->analogBank6Data, reg_writes);
530}
531
532/**
533 * ath9k_hw_set_channel - tune to a channel on the external AR2133/AR5133 radios
534 * @ah: atheros hardware stucture
535 * @chan:
536 *
537 * For the external AR2133/AR5133 radios, takes the MHz channel value and set
538 * the channel value. Assumes writes enabled to analog bus and bank6 register
539 * cache in ah->analogBank6Data.
540 */
541int ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
542{
543 struct ath_common *common = ath9k_hw_common(ah);
544 u32 channelSel = 0;
545 u32 bModeSynth = 0;
546 u32 aModeRefSel = 0;
547 u32 reg32 = 0;
548 u16 freq;
549 struct chan_centers centers;
550
551 ath9k_hw_get_channel_centers(ah, chan, &centers);
552 freq = centers.synth_center;
553
554 if (freq < 4800) {
555 u32 txctl;
556
557 if (((freq - 2192) % 5) == 0) {
558 channelSel = ((freq - 672) * 2 - 3040) / 10;
559 bModeSynth = 0;
560 } else if (((freq - 2224) % 5) == 0) {
561 channelSel = ((freq - 704) * 2 - 3040) / 10;
562 bModeSynth = 1;
563 } else {
564 ath_print(common, ATH_DBG_FATAL,
565 "Invalid channel %u MHz\n", freq);
566 return -EINVAL;
567 }
568
569 channelSel = (channelSel << 2) & 0xff;
570 channelSel = ath9k_hw_reverse_bits(channelSel, 8);
571
572 txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
573 if (freq == 2484) {
574
575 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
576 txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
577 } else {
578 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
579 txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
580 }
581
582 } else if ((freq % 20) == 0 && freq >= 5120) {
583 channelSel =
584 ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
585 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
586 } else if ((freq % 10) == 0) {
587 channelSel =
588 ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
589 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
590 aModeRefSel = ath9k_hw_reverse_bits(2, 2);
591 else
592 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
593 } else if ((freq % 5) == 0) {
594 channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
595 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
596 } else {
597 ath_print(common, ATH_DBG_FATAL,
598 "Invalid channel %u MHz\n", freq);
599 return -EINVAL;
600 }
601
602 ath9k_hw_force_bias(ah, freq);
603
604 reg32 =
605 (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
606 (1 << 5) | 0x1;
607
608 REG_WRITE(ah, AR_PHY(0x37), reg32);
609
610 ah->curchan = chan;
611 ah->curchan_rad_index = -1;
612
613 return 0;
614}
615
616/**
617 * ath9k_hw_spur_mitigate - convert baseband spur frequency for external radios
618 * @ah: atheros hardware structure
619 * @chan:
620 *
621 * For non single-chip solutions. Converts to baseband spur frequency given the
622 * input channel frequency and compute register settings below.
623 */
624void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
625{
626 int bb_spur = AR_NO_SPUR;
627 int bin, cur_bin;
628 int spur_freq_sd;
629 int spur_delta_phase;
630 int denominator;
631 int upper, lower, cur_vit_mask;
632 int tmp, new;
633 int i;
634 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
635 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
636 };
637 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
638 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
639 };
640 int inc[4] = { 0, 100, 0, 0 };
641
642 int8_t mask_m[123];
643 int8_t mask_p[123];
644 int8_t mask_amt;
645 int tmp_mask;
646 int cur_bb_spur;
647 bool is2GHz = IS_CHAN_2GHZ(chan);
648
649 memset(&mask_m, 0, sizeof(int8_t) * 123);
650 memset(&mask_p, 0, sizeof(int8_t) * 123);
651
652 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
653 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
654 if (AR_NO_SPUR == cur_bb_spur)
655 break;
656 cur_bb_spur = cur_bb_spur - (chan->channel * 10);
657 if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
658 bb_spur = cur_bb_spur;
659 break;
660 }
661 }
662
663 if (AR_NO_SPUR == bb_spur)
664 return;
665
666 bin = bb_spur * 32;
667
668 tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
669 new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
670 AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
671 AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
672 AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
673
674 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
675
676 new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
677 AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
678 AR_PHY_SPUR_REG_MASK_RATE_SELECT |
679 AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
680 SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
681 REG_WRITE(ah, AR_PHY_SPUR_REG, new);
682
683 spur_delta_phase = ((bb_spur * 524288) / 100) &
684 AR_PHY_TIMING11_SPUR_DELTA_PHASE;
685
686 denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
687 spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
688
689 new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
690 SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
691 SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
692 REG_WRITE(ah, AR_PHY_TIMING11, new);
693
694 cur_bin = -6000;
695 upper = bin + 100;
696 lower = bin - 100;
697
698 for (i = 0; i < 4; i++) {
699 int pilot_mask = 0;
700 int chan_mask = 0;
701 int bp = 0;
702 for (bp = 0; bp < 30; bp++) {
703 if ((cur_bin > lower) && (cur_bin < upper)) {
704 pilot_mask = pilot_mask | 0x1 << bp;
705 chan_mask = chan_mask | 0x1 << bp;
706 }
707 cur_bin += 100;
708 }
709 cur_bin += inc[i];
710 REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
711 REG_WRITE(ah, chan_mask_reg[i], chan_mask);
712 }
713
714 cur_vit_mask = 6100;
715 upper = bin + 120;
716 lower = bin - 120;
717
718 for (i = 0; i < 123; i++) {
719 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
720
721 /* workaround for gcc bug #37014 */
722 volatile int tmp_v = abs(cur_vit_mask - bin);
723
724 if (tmp_v < 75)
725 mask_amt = 1;
726 else
727 mask_amt = 0;
728 if (cur_vit_mask < 0)
729 mask_m[abs(cur_vit_mask / 100)] = mask_amt;
730 else
731 mask_p[cur_vit_mask / 100] = mask_amt;
732 }
733 cur_vit_mask -= 100;
734 }
735
736 tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
737 | (mask_m[48] << 26) | (mask_m[49] << 24)
738 | (mask_m[50] << 22) | (mask_m[51] << 20)
739 | (mask_m[52] << 18) | (mask_m[53] << 16)
740 | (mask_m[54] << 14) | (mask_m[55] << 12)
741 | (mask_m[56] << 10) | (mask_m[57] << 8)
742 | (mask_m[58] << 6) | (mask_m[59] << 4)
743 | (mask_m[60] << 2) | (mask_m[61] << 0);
744 REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
745 REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
746
747 tmp_mask = (mask_m[31] << 28)
748 | (mask_m[32] << 26) | (mask_m[33] << 24)
749 | (mask_m[34] << 22) | (mask_m[35] << 20)
750 | (mask_m[36] << 18) | (mask_m[37] << 16)
751 | (mask_m[48] << 14) | (mask_m[39] << 12)
752 | (mask_m[40] << 10) | (mask_m[41] << 8)
753 | (mask_m[42] << 6) | (mask_m[43] << 4)
754 | (mask_m[44] << 2) | (mask_m[45] << 0);
755 REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
756 REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
757
758 tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
759 | (mask_m[18] << 26) | (mask_m[18] << 24)
760 | (mask_m[20] << 22) | (mask_m[20] << 20)
761 | (mask_m[22] << 18) | (mask_m[22] << 16)
762 | (mask_m[24] << 14) | (mask_m[24] << 12)
763 | (mask_m[25] << 10) | (mask_m[26] << 8)
764 | (mask_m[27] << 6) | (mask_m[28] << 4)
765 | (mask_m[29] << 2) | (mask_m[30] << 0);
766 REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
767 REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
768
769 tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
770 | (mask_m[2] << 26) | (mask_m[3] << 24)
771 | (mask_m[4] << 22) | (mask_m[5] << 20)
772 | (mask_m[6] << 18) | (mask_m[7] << 16)
773 | (mask_m[8] << 14) | (mask_m[9] << 12)
774 | (mask_m[10] << 10) | (mask_m[11] << 8)
775 | (mask_m[12] << 6) | (mask_m[13] << 4)
776 | (mask_m[14] << 2) | (mask_m[15] << 0);
777 REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
778 REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
779
780 tmp_mask = (mask_p[15] << 28)
781 | (mask_p[14] << 26) | (mask_p[13] << 24)
782 | (mask_p[12] << 22) | (mask_p[11] << 20)
783 | (mask_p[10] << 18) | (mask_p[9] << 16)
784 | (mask_p[8] << 14) | (mask_p[7] << 12)
785 | (mask_p[6] << 10) | (mask_p[5] << 8)
786 | (mask_p[4] << 6) | (mask_p[3] << 4)
787 | (mask_p[2] << 2) | (mask_p[1] << 0);
788 REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
789 REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
790
791 tmp_mask = (mask_p[30] << 28)
792 | (mask_p[29] << 26) | (mask_p[28] << 24)
793 | (mask_p[27] << 22) | (mask_p[26] << 20)
794 | (mask_p[25] << 18) | (mask_p[24] << 16)
795 | (mask_p[23] << 14) | (mask_p[22] << 12)
796 | (mask_p[21] << 10) | (mask_p[20] << 8)
797 | (mask_p[19] << 6) | (mask_p[18] << 4)
798 | (mask_p[17] << 2) | (mask_p[16] << 0);
799 REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
800 REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
801
802 tmp_mask = (mask_p[45] << 28)
803 | (mask_p[44] << 26) | (mask_p[43] << 24)
804 | (mask_p[42] << 22) | (mask_p[41] << 20)
805 | (mask_p[40] << 18) | (mask_p[39] << 16)
806 | (mask_p[38] << 14) | (mask_p[37] << 12)
807 | (mask_p[36] << 10) | (mask_p[35] << 8)
808 | (mask_p[34] << 6) | (mask_p[33] << 4)
809 | (mask_p[32] << 2) | (mask_p[31] << 0);
810 REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
811 REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
812
813 tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
814 | (mask_p[59] << 26) | (mask_p[58] << 24)
815 | (mask_p[57] << 22) | (mask_p[56] << 20)
816 | (mask_p[55] << 18) | (mask_p[54] << 16)
817 | (mask_p[53] << 14) | (mask_p[52] << 12)
818 | (mask_p[51] << 10) | (mask_p[50] << 8)
819 | (mask_p[49] << 6) | (mask_p[48] << 4)
820 | (mask_p[47] << 2) | (mask_p[46] << 0);
821 REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
822 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
823}
824
825/**
826 * ath9k_hw_rf_alloc_ext_banks - allocates banks for external radio programming
827 * @ah: atheros hardware structure
828 *
829 * Only required for older devices with external AR2133/AR5133 radios.
830 */
831int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah)
832{
833#define ATH_ALLOC_BANK(bank, size) do { \
834 bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \
835 if (!bank) { \
836 ath_print(common, ATH_DBG_FATAL, \
837 "Cannot allocate RF banks\n"); \
838 return -ENOMEM; \
839 } \
840 } while (0);
841
842 struct ath_common *common = ath9k_hw_common(ah);
843
844 BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
845
846 ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows);
847 ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows);
848 ATH_ALLOC_BANK(ah->analogBank2Data, ah->iniBank2.ia_rows);
849 ATH_ALLOC_BANK(ah->analogBank3Data, ah->iniBank3.ia_rows);
850 ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows);
851 ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows);
852 ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows);
853 ATH_ALLOC_BANK(ah->addac5416_21,
854 ah->iniAddac.ia_rows * ah->iniAddac.ia_columns);
855 ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows);
856
857 return 0;
858#undef ATH_ALLOC_BANK
859}
860
861
862/**
863 * ath9k_hw_rf_free_ext_banks - Free memory for analog bank scratch buffers
864 * @ah: atheros hardware struture
865 * For the external AR2133/AR5133 radios banks.
866 */
867void
868ath9k_hw_rf_free_ext_banks(struct ath_hw *ah)
869{
870#define ATH_FREE_BANK(bank) do { \
871 kfree(bank); \
872 bank = NULL; \
873 } while (0);
874
875 BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
876
877 ATH_FREE_BANK(ah->analogBank0Data);
878 ATH_FREE_BANK(ah->analogBank1Data);
879 ATH_FREE_BANK(ah->analogBank2Data);
880 ATH_FREE_BANK(ah->analogBank3Data);
881 ATH_FREE_BANK(ah->analogBank6Data);
882 ATH_FREE_BANK(ah->analogBank6TPCData);
883 ATH_FREE_BANK(ah->analogBank7Data);
884 ATH_FREE_BANK(ah->addac5416_21);
885 ATH_FREE_BANK(ah->bank6Temp);
886
887#undef ATH_FREE_BANK
888}
889
890/* *
891 * ath9k_hw_set_rf_regs - programs rf registers based on EEPROM
892 * @ah: atheros hardware structure
893 * @chan:
894 * @modesIndex:
895 *
896 * Used for the external AR2133/AR5133 radios.
897 *
898 * Reads the EEPROM header info from the device structure and programs
899 * all rf registers. This routine requires access to the analog
900 * rf device. This is not required for single-chip devices.
901 */
902bool ath9k_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan,
903 u16 modesIndex)
904{
905 u32 eepMinorRev;
906 u32 ob5GHz = 0, db5GHz = 0;
907 u32 ob2GHz = 0, db2GHz = 0;
908 int regWrites = 0;
909
910 /*
911 * Software does not need to program bank data
912 * for single chip devices, that is AR9280 or anything
913 * after that.
914 */
915 if (AR_SREV_9280_10_OR_LATER(ah))
916 return true;
917
918 /* Setup rf parameters */
919 eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV);
920
921 /* Setup Bank 0 Write */
922 RF_BANK_SETUP(ah->analogBank0Data, &ah->iniBank0, 1);
923
924 /* Setup Bank 1 Write */
925 RF_BANK_SETUP(ah->analogBank1Data, &ah->iniBank1, 1);
926
927 /* Setup Bank 2 Write */
928 RF_BANK_SETUP(ah->analogBank2Data, &ah->iniBank2, 1);
929
930 /* Setup Bank 6 Write */
931 RF_BANK_SETUP(ah->analogBank3Data, &ah->iniBank3,
932 modesIndex);
933 {
934 int i;
935 for (i = 0; i < ah->iniBank6TPC.ia_rows; i++) {
936 ah->analogBank6Data[i] =
937 INI_RA(&ah->iniBank6TPC, i, modesIndex);
938 }
939 }
940
941 /* Only the 5 or 2 GHz OB/DB need to be set for a mode */
942 if (eepMinorRev >= 2) {
943 if (IS_CHAN_2GHZ(chan)) {
944 ob2GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_2);
945 db2GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_2);
946 ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
947 ob2GHz, 3, 197, 0);
948 ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
949 db2GHz, 3, 194, 0);
950 } else {
951 ob5GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_5);
952 db5GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_5);
953 ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
954 ob5GHz, 3, 203, 0);
955 ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
956 db5GHz, 3, 200, 0);
957 }
958 }
959
960 /* Setup Bank 7 Setup */
961 RF_BANK_SETUP(ah->analogBank7Data, &ah->iniBank7, 1);
962
963 /* Write Analog registers */
964 REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data,
965 regWrites);
966 REG_WRITE_RF_ARRAY(&ah->iniBank1, ah->analogBank1Data,
967 regWrites);
968 REG_WRITE_RF_ARRAY(&ah->iniBank2, ah->analogBank2Data,
969 regWrites);
970 REG_WRITE_RF_ARRAY(&ah->iniBank3, ah->analogBank3Data,
971 regWrites);
972 REG_WRITE_RF_ARRAY(&ah->iniBank6TPC, ah->analogBank6Data,
973 regWrites);
974 REG_WRITE_RF_ARRAY(&ah->iniBank7, ah->analogBank7Data,
975 regWrites);
976
977 return true;
978}
diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
index 0132e4c9a9f9..e724c2c1ae2a 100644
--- a/drivers/net/wireless/ath/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -17,504 +17,15 @@
17#ifndef PHY_H 17#ifndef PHY_H
18#define PHY_H 18#define PHY_H
19 19
20/* Common between single chip and non single-chip solutions */ 20#define CHANSEL_DIV 15
21void ath9k_hw_write_regs(struct ath_hw *ah, u32 freqIndex, int regWrites); 21#define CHANSEL_2G(_freq) (((_freq) * 0x10000) / CHANSEL_DIV)
22 22#define CHANSEL_5G(_freq) (((_freq) * 0x8000) / CHANSEL_DIV)
23/* Single chip radio settings */
24int ath9k_hw_ar9280_set_channel(struct ath_hw *ah, struct ath9k_channel *chan);
25void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan);
26
27/* Routines below are for non single-chip solutions */
28int ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan);
29void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan);
30
31int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah);
32void ath9k_hw_rf_free_ext_banks(struct ath_hw *ah);
33
34bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
35 struct ath9k_channel *chan,
36 u16 modesIndex);
37 23
38#define AR_PHY_BASE 0x9800 24#define AR_PHY_BASE 0x9800
39#define AR_PHY(_n) (AR_PHY_BASE + ((_n)<<2)) 25#define AR_PHY(_n) (AR_PHY_BASE + ((_n)<<2))
40 26
41#define AR_PHY_TEST 0x9800
42#define PHY_AGC_CLR 0x10000000
43#define RFSILENT_BB 0x00002000
44
45#define AR_PHY_TURBO 0x9804
46#define AR_PHY_FC_TURBO_MODE 0x00000001
47#define AR_PHY_FC_TURBO_SHORT 0x00000002
48#define AR_PHY_FC_DYN2040_EN 0x00000004
49#define AR_PHY_FC_DYN2040_PRI_ONLY 0x00000008
50#define AR_PHY_FC_DYN2040_PRI_CH 0x00000010
51/* For 25 MHz channel spacing -- not used but supported by hw */
52#define AR_PHY_FC_DYN2040_EXT_CH 0x00000020
53#define AR_PHY_FC_HT_EN 0x00000040
54#define AR_PHY_FC_SHORT_GI_40 0x00000080
55#define AR_PHY_FC_WALSH 0x00000100
56#define AR_PHY_FC_SINGLE_HT_LTF1 0x00000200
57#define AR_PHY_FC_ENABLE_DAC_FIFO 0x00000800
58
59#define AR_PHY_TEST2 0x9808
60
61#define AR_PHY_TIMING2 0x9810
62#define AR_PHY_TIMING3 0x9814
63#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000
64#define AR_PHY_TIMING3_DSC_MAN_S 17
65#define AR_PHY_TIMING3_DSC_EXP 0x0001E000
66#define AR_PHY_TIMING3_DSC_EXP_S 13
67
68#define AR_PHY_CHIP_ID 0x9818
69#define AR_PHY_CHIP_ID_REV_0 0x80
70#define AR_PHY_CHIP_ID_REV_1 0x81
71#define AR_PHY_CHIP_ID_9160_REV_0 0xb0
72
73#define AR_PHY_ACTIVE 0x981C
74#define AR_PHY_ACTIVE_EN 0x00000001
75#define AR_PHY_ACTIVE_DIS 0x00000000
76
77#define AR_PHY_RF_CTL2 0x9824
78#define AR_PHY_TX_END_DATA_START 0x000000FF
79#define AR_PHY_TX_END_DATA_START_S 0
80#define AR_PHY_TX_END_PA_ON 0x0000FF00
81#define AR_PHY_TX_END_PA_ON_S 8
82
83#define AR_PHY_RF_CTL3 0x9828
84#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
85#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
86
87#define AR_PHY_ADC_CTL 0x982C
88#define AR_PHY_ADC_CTL_OFF_INBUFGAIN 0x00000003
89#define AR_PHY_ADC_CTL_OFF_INBUFGAIN_S 0
90#define AR_PHY_ADC_CTL_OFF_PWDDAC 0x00002000
91#define AR_PHY_ADC_CTL_OFF_PWDBANDGAP 0x00004000
92#define AR_PHY_ADC_CTL_OFF_PWDADC 0x00008000
93#define AR_PHY_ADC_CTL_ON_INBUFGAIN 0x00030000
94#define AR_PHY_ADC_CTL_ON_INBUFGAIN_S 16
95
96#define AR_PHY_ADC_SERIAL_CTL 0x9830
97#define AR_PHY_SEL_INTERNAL_ADDAC 0x00000000
98#define AR_PHY_SEL_EXTERNAL_RADIO 0x00000001
99
100#define AR_PHY_RF_CTL4 0x9834
101#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF 0xFF000000
102#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF_S 24
103#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF 0x00FF0000
104#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF_S 16
105#define AR_PHY_RF_CTL4_FRAME_XPAB_ON 0x0000FF00
106#define AR_PHY_RF_CTL4_FRAME_XPAB_ON_S 8
107#define AR_PHY_RF_CTL4_FRAME_XPAA_ON 0x000000FF
108#define AR_PHY_RF_CTL4_FRAME_XPAA_ON_S 0
109
110#define AR_PHY_TSTDAC_CONST 0x983c
111
112#define AR_PHY_SETTLING 0x9844
113#define AR_PHY_SETTLING_SWITCH 0x00003F80
114#define AR_PHY_SETTLING_SWITCH_S 7
115
116#define AR_PHY_RXGAIN 0x9848
117#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000
118#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12
119#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000
120#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18
121#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80
122#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7
123#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000
124#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14
125
126#define AR_PHY_DESIRED_SZ 0x9850
127#define AR_PHY_DESIRED_SZ_ADC 0x000000FF
128#define AR_PHY_DESIRED_SZ_ADC_S 0
129#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00
130#define AR_PHY_DESIRED_SZ_PGA_S 8
131#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000
132#define AR_PHY_DESIRED_SZ_TOT_DES_S 20
133
134#define AR_PHY_FIND_SIG 0x9858
135#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000
136#define AR_PHY_FIND_SIG_FIRSTEP_S 12
137#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
138#define AR_PHY_FIND_SIG_FIRPWR_S 18
139
140#define AR_PHY_AGC_CTL1 0x985C
141#define AR_PHY_AGC_CTL1_COARSE_LOW 0x00007F80
142#define AR_PHY_AGC_CTL1_COARSE_LOW_S 7
143#define AR_PHY_AGC_CTL1_COARSE_HIGH 0x003F8000
144#define AR_PHY_AGC_CTL1_COARSE_HIGH_S 15
145
146#define AR_PHY_AGC_CONTROL 0x9860
147#define AR_PHY_AGC_CONTROL_CAL 0x00000001
148#define AR_PHY_AGC_CONTROL_NF 0x00000002
149#define AR_PHY_AGC_CONTROL_ENABLE_NF 0x00008000
150#define AR_PHY_AGC_CONTROL_FLTR_CAL 0x00010000
151#define AR_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000
152
153#define AR_PHY_CCA 0x9864
154#define AR_PHY_MINCCA_PWR 0x0FF80000
155#define AR_PHY_MINCCA_PWR_S 19
156#define AR_PHY_CCA_THRESH62 0x0007F000
157#define AR_PHY_CCA_THRESH62_S 12
158#define AR9280_PHY_MINCCA_PWR 0x1FF00000
159#define AR9280_PHY_MINCCA_PWR_S 20
160#define AR9280_PHY_CCA_THRESH62 0x000FF000
161#define AR9280_PHY_CCA_THRESH62_S 12
162
163#define AR_PHY_SFCORR_LOW 0x986C
164#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
165#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00
166#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
167#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000
168#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
169#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000
170#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
171
172#define AR_PHY_SFCORR 0x9868
173#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F
174#define AR_PHY_SFCORR_M2COUNT_THR_S 0
175#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000
176#define AR_PHY_SFCORR_M1_THRESH_S 17
177#define AR_PHY_SFCORR_M2_THRESH 0x7F000000
178#define AR_PHY_SFCORR_M2_THRESH_S 24
179
180#define AR_PHY_SLEEP_CTR_CONTROL 0x9870
181#define AR_PHY_SLEEP_CTR_LIMIT 0x9874
182#define AR_PHY_SYNTH_CONTROL 0x9874
183#define AR_PHY_SLEEP_SCAL 0x9878
184
185#define AR_PHY_PLL_CTL 0x987c
186#define AR_PHY_PLL_CTL_40 0xaa
187#define AR_PHY_PLL_CTL_40_5413 0x04
188#define AR_PHY_PLL_CTL_44 0xab
189#define AR_PHY_PLL_CTL_44_2133 0xeb
190#define AR_PHY_PLL_CTL_40_2133 0xea
191
192#define AR_PHY_SPECTRAL_SCAN 0x9910 /* AR9280 spectral scan configuration register */
193#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x1
194#define AR_PHY_SPECTRAL_SCAN_ENA 0x00000001 /* Enable spectral scan, reg 68, bit 0 */
195#define AR_PHY_SPECTRAL_SCAN_ENA_S 0 /* Enable spectral scan, reg 68, bit 0 */
196#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002 /* Activate spectral scan reg 68, bit 1*/
197#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1 /* Activate spectral scan reg 68, bit 1*/
198#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0 /* Interval for FFT reports, reg 68, bits 4-7*/
199#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
200#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00 /* Interval for FFT reports, reg 68, bits 8-15*/
201#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
202#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000 /* Number of reports, reg 68, bits 16-23*/
203#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
204#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000 /* Short repeat, reg 68, bit 24*/
205#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24 /* Short repeat, reg 68, bit 24*/
206
207#define AR_PHY_RX_DELAY 0x9914
208#define AR_PHY_SEARCH_START_DELAY 0x9918
209#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
210
211#define AR_PHY_TIMING_CTRL4(_i) (0x9920 + ((_i) << 12))
212#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF 0x01F
213#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF_S 0
214#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF 0x7E0
215#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF_S 5
216#define AR_PHY_TIMING_CTRL4_IQCORR_ENABLE 0x800
217#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX 0xF000
218#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX_S 12
219#define AR_PHY_TIMING_CTRL4_DO_CAL 0x10000
220
221#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI 0x80000000
222#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER 0x40000000
223#define AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK 0x20000000
224#define AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK 0x10000000
225
226#define AR_PHY_TIMING5 0x9924
227#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
228#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
229
230#define AR_PHY_POWER_TX_RATE1 0x9934
231#define AR_PHY_POWER_TX_RATE2 0x9938
232#define AR_PHY_POWER_TX_RATE_MAX 0x993c
233#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
234
235#define AR_PHY_FRAME_CTL 0x9944
236#define AR_PHY_FRAME_CTL_TX_CLIP 0x00000038
237#define AR_PHY_FRAME_CTL_TX_CLIP_S 3
238
239#define AR_PHY_TXPWRADJ 0x994C
240#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA 0x00000FC0
241#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA_S 6
242#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX 0x00FC0000
243#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX_S 18
244
245#define AR_PHY_RADAR_EXT 0x9940
246#define AR_PHY_RADAR_EXT_ENA 0x00004000
247
248#define AR_PHY_RADAR_0 0x9954
249#define AR_PHY_RADAR_0_ENA 0x00000001
250#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
251#define AR_PHY_RADAR_0_INBAND 0x0000003e
252#define AR_PHY_RADAR_0_INBAND_S 1
253#define AR_PHY_RADAR_0_PRSSI 0x00000FC0
254#define AR_PHY_RADAR_0_PRSSI_S 6
255#define AR_PHY_RADAR_0_HEIGHT 0x0003F000
256#define AR_PHY_RADAR_0_HEIGHT_S 12
257#define AR_PHY_RADAR_0_RRSSI 0x00FC0000
258#define AR_PHY_RADAR_0_RRSSI_S 18
259#define AR_PHY_RADAR_0_FIRPWR 0x7F000000
260#define AR_PHY_RADAR_0_FIRPWR_S 24
261
262#define AR_PHY_RADAR_1 0x9958
263#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
264#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
265#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
266#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
267#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
268#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
269#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
270#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
271#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
272#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
273#define AR_PHY_RADAR_1_MAXLEN_S 0
274
275#define AR_PHY_SWITCH_CHAIN_0 0x9960
276#define AR_PHY_SWITCH_COM 0x9964
277
278#define AR_PHY_SIGMA_DELTA 0x996C
279#define AR_PHY_SIGMA_DELTA_ADC_SEL 0x00000003
280#define AR_PHY_SIGMA_DELTA_ADC_SEL_S 0
281#define AR_PHY_SIGMA_DELTA_FILT2 0x000000F8
282#define AR_PHY_SIGMA_DELTA_FILT2_S 3
283#define AR_PHY_SIGMA_DELTA_FILT1 0x00001F00
284#define AR_PHY_SIGMA_DELTA_FILT1_S 8
285#define AR_PHY_SIGMA_DELTA_ADC_CLIP 0x01FFE000
286#define AR_PHY_SIGMA_DELTA_ADC_CLIP_S 13
287
288#define AR_PHY_RESTART 0x9970
289#define AR_PHY_RESTART_DIV_GC 0x001C0000
290#define AR_PHY_RESTART_DIV_GC_S 18
291
292#define AR_PHY_RFBUS_REQ 0x997C
293#define AR_PHY_RFBUS_REQ_EN 0x00000001
294
295#define AR_PHY_TIMING7 0x9980
296#define AR_PHY_TIMING8 0x9984
297#define AR_PHY_TIMING8_PILOT_MASK_2 0x000FFFFF
298#define AR_PHY_TIMING8_PILOT_MASK_2_S 0
299
300#define AR_PHY_BIN_MASK2_1 0x9988
301#define AR_PHY_BIN_MASK2_2 0x998c
302#define AR_PHY_BIN_MASK2_3 0x9990
303#define AR_PHY_BIN_MASK2_4 0x9994
304
305#define AR_PHY_BIN_MASK_1 0x9900
306#define AR_PHY_BIN_MASK_2 0x9904
307#define AR_PHY_BIN_MASK_3 0x9908
308
309#define AR_PHY_MASK_CTL 0x990c
310
311#define AR_PHY_BIN_MASK2_4_MASK_4 0x00003FFF
312#define AR_PHY_BIN_MASK2_4_MASK_4_S 0
313
314#define AR_PHY_TIMING9 0x9998
315#define AR_PHY_TIMING10 0x999c
316#define AR_PHY_TIMING10_PILOT_MASK_2 0x000FFFFF
317#define AR_PHY_TIMING10_PILOT_MASK_2_S 0
318
319#define AR_PHY_TIMING11 0x99a0
320#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF
321#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
322#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
323#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
324#define AR_PHY_TIMING11_USE_SPUR_IN_AGC 0x40000000
325#define AR_PHY_TIMING11_USE_SPUR_IN_SELFCOR 0x80000000
326
327#define AR_PHY_RX_CHAINMASK 0x99a4
328#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (0x99b4 + ((_i) << 12))
329#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
330#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
331
332#define AR_PHY_MULTICHAIN_GAIN_CTL 0x99ac
333#define AR_PHY_9285_ANT_DIV_CTL_ALL 0x7f000000
334#define AR_PHY_9285_ANT_DIV_CTL 0x01000000
335#define AR_PHY_9285_ANT_DIV_CTL_S 24
336#define AR_PHY_9285_ANT_DIV_ALT_LNACONF 0x06000000
337#define AR_PHY_9285_ANT_DIV_ALT_LNACONF_S 25
338#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF 0x18000000
339#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S 27
340#define AR_PHY_9285_ANT_DIV_ALT_GAINTB 0x20000000
341#define AR_PHY_9285_ANT_DIV_ALT_GAINTB_S 29
342#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB 0x40000000
343#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB_S 30
344#define AR_PHY_9285_ANT_DIV_LNA1 2
345#define AR_PHY_9285_ANT_DIV_LNA2 1
346#define AR_PHY_9285_ANT_DIV_LNA1_PLUS_LNA2 3
347#define AR_PHY_9285_ANT_DIV_LNA1_MINUS_LNA2 0
348#define AR_PHY_9285_ANT_DIV_GAINTB_0 0
349#define AR_PHY_9285_ANT_DIV_GAINTB_1 1
350
351#define AR_PHY_EXT_CCA0 0x99b8
352#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
353#define AR_PHY_EXT_CCA0_THRESH62_S 0
354
355#define AR_PHY_EXT_CCA 0x99bc
356#define AR_PHY_EXT_CCA_CYCPWR_THR1 0x0000FE00
357#define AR_PHY_EXT_CCA_CYCPWR_THR1_S 9
358#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
359#define AR_PHY_EXT_CCA_THRESH62_S 16
360#define AR_PHY_EXT_MINCCA_PWR 0xFF800000
361#define AR_PHY_EXT_MINCCA_PWR_S 23
362#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000
363#define AR9280_PHY_EXT_MINCCA_PWR_S 16
364
365#define AR_PHY_SFCORR_EXT 0x99c0
366#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F
367#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0
368#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80
369#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7
370#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000
371#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
372#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000
373#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
374#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
375
376#define AR_PHY_HALFGI 0x99D0
377#define AR_PHY_HALFGI_DSC_MAN 0x0007FFF0
378#define AR_PHY_HALFGI_DSC_MAN_S 4
379#define AR_PHY_HALFGI_DSC_EXP 0x0000000F
380#define AR_PHY_HALFGI_DSC_EXP_S 0
381
382#define AR_PHY_CHAN_INFO_MEMORY 0x99DC
383#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
384
385#define AR_PHY_HEAVY_CLIP_ENABLE 0x99E0
386
387#define AR_PHY_HEAVY_CLIP_FACTOR_RIFS 0x99EC
388#define AR_PHY_RIFS_INIT_DELAY 0x03ff0000
389
390#define AR_PHY_M_SLEEP 0x99f0
391#define AR_PHY_REFCLKDLY 0x99f4
392#define AR_PHY_REFCLKPD 0x99f8
393
394#define AR_PHY_CALMODE 0x99f0
395
396#define AR_PHY_CALMODE_IQ 0x00000000
397#define AR_PHY_CALMODE_ADC_GAIN 0x00000001
398#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002
399#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003
400
401#define AR_PHY_CAL_MEAS_0(_i) (0x9c10 + ((_i) << 12))
402#define AR_PHY_CAL_MEAS_1(_i) (0x9c14 + ((_i) << 12))
403#define AR_PHY_CAL_MEAS_2(_i) (0x9c18 + ((_i) << 12))
404#define AR_PHY_CAL_MEAS_3(_i) (0x9c1c + ((_i) << 12))
405
406#define AR_PHY_CURRENT_RSSI 0x9c1c
407#define AR9280_PHY_CURRENT_RSSI 0x9c3c
408
409#define AR_PHY_RFBUS_GRANT 0x9C20
410#define AR_PHY_RFBUS_GRANT_EN 0x00000001
411
412#define AR_PHY_CHAN_INFO_GAIN_DIFF 0x9CF4
413#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
414
415#define AR_PHY_CHAN_INFO_GAIN 0x9CFC
416
417#define AR_PHY_MODE 0xA200
418#define AR_PHY_MODE_ASYNCFIFO 0x80
419#define AR_PHY_MODE_AR2133 0x08
420#define AR_PHY_MODE_AR5111 0x00
421#define AR_PHY_MODE_AR5112 0x08
422#define AR_PHY_MODE_DYNAMIC 0x04
423#define AR_PHY_MODE_RF2GHZ 0x02
424#define AR_PHY_MODE_RF5GHZ 0x00
425#define AR_PHY_MODE_CCK 0x01
426#define AR_PHY_MODE_OFDM 0x00
427#define AR_PHY_MODE_DYN_CCK_DISABLE 0x100
428
429#define AR_PHY_CCK_TX_CTRL 0xA204
430#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
431#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK 0x0000000C
432#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK_S 2
433
434#define AR_PHY_CCK_DETECT 0xA208
435#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
436#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
437/* [12:6] settling time for antenna switch */
438#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
439#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
440#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
441#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV_S 13
442
443#define AR_PHY_GAIN_2GHZ 0xA20C
444#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN 0x00FC0000
445#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN_S 18
446#define AR_PHY_GAIN_2GHZ_BSW_MARGIN 0x00003C00
447#define AR_PHY_GAIN_2GHZ_BSW_MARGIN_S 10
448#define AR_PHY_GAIN_2GHZ_BSW_ATTEN 0x0000001F
449#define AR_PHY_GAIN_2GHZ_BSW_ATTEN_S 0
450
451#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN 0x003E0000
452#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN_S 17
453#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN 0x0001F000
454#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN_S 12
455#define AR_PHY_GAIN_2GHZ_XATTEN2_DB 0x00000FC0
456#define AR_PHY_GAIN_2GHZ_XATTEN2_DB_S 6
457#define AR_PHY_GAIN_2GHZ_XATTEN1_DB 0x0000003F
458#define AR_PHY_GAIN_2GHZ_XATTEN1_DB_S 0
459
460#define AR_PHY_CCK_RXCTRL4 0xA21C
461#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT 0x01F80000
462#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT_S 19
463
464#define AR_PHY_DAG_CTRLCCK 0xA228
465#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
466#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00
467#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
468
469#define AR_PHY_FORCE_CLKEN_CCK 0xA22C
470#define AR_PHY_FORCE_CLKEN_CCK_MRC_MUX 0x00000040
471
472#define AR_PHY_POWER_TX_RATE3 0xA234
473#define AR_PHY_POWER_TX_RATE4 0xA238
474
475#define AR_PHY_SCRM_SEQ_XR 0xA23C
476#define AR_PHY_HEADER_DETECT_XR 0xA240
477#define AR_PHY_CHIRP_DETECTED_XR 0xA244
478#define AR_PHY_BLUETOOTH 0xA254
479
480#define AR_PHY_TPCRG1 0xA258
481#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
482#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14
483
484#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000
485#define AR_PHY_TPCRG1_PD_GAIN_1_S 16
486#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000
487#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
488#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
489#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
490
491#define AR_PHY_TPCRG1_PD_CAL_ENABLE 0x00400000
492#define AR_PHY_TPCRG1_PD_CAL_ENABLE_S 22
493
494#define AR_PHY_TX_PWRCTRL4 0xa264
495#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID 0x00000001
496#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID_S 0
497#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT 0x000001FE
498#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT_S 1
499
500#define AR_PHY_TX_PWRCTRL6_0 0xa270
501#define AR_PHY_TX_PWRCTRL6_1 0xb270
502#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE 0x03000000
503#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE_S 24
504
505#define AR_PHY_TX_PWRCTRL7 0xa274
506#define AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX 0x0007E000 27#define AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX 0x0007E000
507#define AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX_S 13 28#define AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX_S 13
508#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN 0x01F80000
509#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN_S 19
510
511#define AR_PHY_TX_PWRCTRL9 0xa27C
512#define AR_PHY_TX_DESIRED_SCALE_CCK 0x00007C00
513#define AR_PHY_TX_DESIRED_SCALE_CCK_S 10
514#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL 0x80000000
515#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL_S 31
516
517#define AR_PHY_TX_GAIN_TBL1 0xa300
518#define AR_PHY_TX_GAIN_CLC 0x0000001E 29#define AR_PHY_TX_GAIN_CLC 0x0000001E
519#define AR_PHY_TX_GAIN_CLC_S 1 30#define AR_PHY_TX_GAIN_CLC_S 1
520#define AR_PHY_TX_GAIN 0x0007F000 31#define AR_PHY_TX_GAIN 0x0007F000
@@ -526,91 +37,6 @@ bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
526#define AR_PHY_CLC_Q0 0x0000ffd0 37#define AR_PHY_CLC_Q0 0x0000ffd0
527#define AR_PHY_CLC_Q0_S 5 38#define AR_PHY_CLC_Q0_S 5
528 39
529#define AR_PHY_CH0_TX_PWRCTRL11 0xa398
530#define AR_PHY_CH1_TX_PWRCTRL11 0xb398
531#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP 0x0000FC00
532#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP_S 10
533
534#define AR_PHY_VIT_MASK2_M_46_61 0xa3a0
535#define AR_PHY_MASK2_M_31_45 0xa3a4
536#define AR_PHY_MASK2_M_16_30 0xa3a8
537#define AR_PHY_MASK2_M_00_15 0xa3ac
538#define AR_PHY_MASK2_P_15_01 0xa3b8
539#define AR_PHY_MASK2_P_30_16 0xa3bc
540#define AR_PHY_MASK2_P_45_31 0xa3c0
541#define AR_PHY_MASK2_P_61_45 0xa3c4
542#define AR_PHY_SPUR_REG 0x994c
543
544#define AR_PHY_SPUR_REG_MASK_RATE_CNTL (0xFF << 18)
545#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
546
547#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000
548#define AR_PHY_SPUR_REG_MASK_RATE_SELECT (0xFF << 9)
549#define AR_PHY_SPUR_REG_MASK_RATE_SELECT_S 9
550#define AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI 0x100
551#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x7F
552#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
553
554#define AR_PHY_PILOT_MASK_01_30 0xa3b0
555#define AR_PHY_PILOT_MASK_31_60 0xa3b4
556
557#define AR_PHY_CHANNEL_MASK_01_30 0x99d4
558#define AR_PHY_CHANNEL_MASK_31_60 0x99d8
559
560#define AR_PHY_ANALOG_SWAP 0xa268
561#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
562
563#define AR_PHY_TPCRG5 0xA26C
564#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F
565#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
566#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0
567#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
568#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00
569#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
570#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000
571#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
572#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000
573#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
574
575/* Carrier leak calibration control, do it after AGC calibration */
576#define AR_PHY_CL_CAL_CTL 0xA358
577#define AR_PHY_CL_CAL_ENABLE 0x00000002
578#define AR_PHY_PARALLEL_CAL_ENABLE 0x00000001
579
580#define AR_PHY_POWER_TX_RATE5 0xA38C
581#define AR_PHY_POWER_TX_RATE6 0xA390
582
583#define AR_PHY_CAL_CHAINMASK 0xA39C
584
585#define AR_PHY_POWER_TX_SUB 0xA3C8
586#define AR_PHY_POWER_TX_RATE7 0xA3CC
587#define AR_PHY_POWER_TX_RATE8 0xA3D0
588#define AR_PHY_POWER_TX_RATE9 0xA3D4
589
590#define AR_PHY_XPA_CFG 0xA3D8
591#define AR_PHY_FORCE_XPA_CFG 0x000000001
592#define AR_PHY_FORCE_XPA_CFG_S 0
593
594#define AR_PHY_CH1_CCA 0xa864
595#define AR_PHY_CH1_MINCCA_PWR 0x0FF80000
596#define AR_PHY_CH1_MINCCA_PWR_S 19
597#define AR9280_PHY_CH1_MINCCA_PWR 0x1FF00000
598#define AR9280_PHY_CH1_MINCCA_PWR_S 20
599
600#define AR_PHY_CH2_CCA 0xb864
601#define AR_PHY_CH2_MINCCA_PWR 0x0FF80000
602#define AR_PHY_CH2_MINCCA_PWR_S 19
603
604#define AR_PHY_CH1_EXT_CCA 0xa9bc
605#define AR_PHY_CH1_EXT_MINCCA_PWR 0xFF800000
606#define AR_PHY_CH1_EXT_MINCCA_PWR_S 23
607#define AR9280_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000
608#define AR9280_PHY_CH1_EXT_MINCCA_PWR_S 16
609
610#define AR_PHY_CH2_EXT_CCA 0xb9bc
611#define AR_PHY_CH2_EXT_MINCCA_PWR 0xFF800000
612#define AR_PHY_CH2_EXT_MINCCA_PWR_S 23
613
614#define REG_WRITE_RF_ARRAY(iniarray, regData, regWr) do { \ 40#define REG_WRITE_RF_ARRAY(iniarray, regData, regWr) do { \
615 int r; \ 41 int r; \
616 for (r = 0; r < ((iniarray)->ia_rows); r++) { \ 42 for (r = 0; r < ((iniarray)->ia_rows); r++) { \
@@ -625,6 +51,7 @@ bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
625#define ANTSWAP_AB 0x0001 51#define ANTSWAP_AB 0x0001
626#define REDUCE_CHAIN_0 0x00000050 52#define REDUCE_CHAIN_0 0x00000050
627#define REDUCE_CHAIN_1 0x00000051 53#define REDUCE_CHAIN_1 0x00000051
54#define AR_PHY_CHIP_ID 0x9818
628 55
629#define RF_BANK_SETUP(_bank, _iniarray, _col) do { \ 56#define RF_BANK_SETUP(_bank, _iniarray, _col) do { \
630 int i; \ 57 int i; \
@@ -632,4 +59,7 @@ bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
632 (_bank)[i] = INI_RA((_iniarray), i, _col);; \ 59 (_bank)[i] = INI_RA((_iniarray), i, _col);; \
633 } while (0) 60 } while (0)
634 61
62#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
63#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
64
635#endif 65#endif
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index ee81291f2fba..8519452c95f1 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -691,6 +691,19 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
691 rate_table = sc->cur_rate_table; 691 rate_table = sc->cur_rate_table;
692 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, &is_probe); 692 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, &is_probe);
693 693
694 /*
695 * If we're in HT mode and both us and our peer supports LDPC.
696 * We don't need to check our own device's capabilities as our own
697 * ht capabilities would have already been intersected with our peer's.
698 */
699 if (conf_is_ht(&sc->hw->conf) &&
700 (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
701 tx_info->flags |= IEEE80211_TX_CTL_LDPC;
702
703 if (conf_is_ht(&sc->hw->conf) &&
704 (sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC))
705 tx_info->flags |= (1 << IEEE80211_TX_CTL_STBC_SHIFT);
706
694 if (is_probe) { 707 if (is_probe) {
695 /* set one try for probe rates. For the 708 /* set one try for probe rates. For the
696 * probes don't enable rts */ 709 * probes don't enable rts */
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 94560e2fe376..ac60c4ee62d3 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -15,6 +15,9 @@
15 */ 15 */
16 16
17#include "ath9k.h" 17#include "ath9k.h"
18#include "ar9003_mac.h"
19
20#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
18 21
19static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, 22static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
20 struct ieee80211_hdr *hdr) 23 struct ieee80211_hdr *hdr)
@@ -115,56 +118,246 @@ static void ath_opmode_init(struct ath_softc *sc)
115 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 118 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
116} 119}
117 120
118int ath_rx_init(struct ath_softc *sc, int nbufs) 121static bool ath_rx_edma_buf_link(struct ath_softc *sc,
122 enum ath9k_rx_qtype qtype)
119{ 123{
120 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 124 struct ath_hw *ah = sc->sc_ah;
125 struct ath_rx_edma *rx_edma;
121 struct sk_buff *skb; 126 struct sk_buff *skb;
122 struct ath_buf *bf; 127 struct ath_buf *bf;
123 int error = 0;
124 128
125 spin_lock_init(&sc->rx.rxflushlock); 129 rx_edma = &sc->rx.rx_edma[qtype];
126 sc->sc_flags &= ~SC_OP_RXFLUSH; 130 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
127 spin_lock_init(&sc->rx.rxbuflock); 131 return false;
128 132
129 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN, 133 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
130 min(common->cachelsz, (u16)64)); 134 list_del_init(&bf->list);
131 135
132 ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", 136 skb = bf->bf_mpdu;
133 common->cachelsz, common->rx_bufsize); 137
138 ATH_RXBUF_RESET(bf);
139 memset(skb->data, 0, ah->caps.rx_status_len);
140 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
141 ah->caps.rx_status_len, DMA_TO_DEVICE);
134 142
135 /* Initialize rx descriptors */ 143 SKB_CB_ATHBUF(skb) = bf;
144 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
145 skb_queue_tail(&rx_edma->rx_fifo, skb);
136 146
137 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 147 return true;
138 "rx", nbufs, 1); 148}
139 if (error != 0) { 149
140 ath_print(common, ATH_DBG_FATAL, 150static void ath_rx_addbuffer_edma(struct ath_softc *sc,
141 "failed to allocate rx descriptors: %d\n", error); 151 enum ath9k_rx_qtype qtype, int size)
142 goto err; 152{
153 struct ath_rx_edma *rx_edma;
154 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
155 u32 nbuf = 0;
156
157 rx_edma = &sc->rx.rx_edma[qtype];
158 if (list_empty(&sc->rx.rxbuf)) {
159 ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n");
160 return;
143 } 161 }
144 162
163 while (!list_empty(&sc->rx.rxbuf)) {
164 nbuf++;
165
166 if (!ath_rx_edma_buf_link(sc, qtype))
167 break;
168
169 if (nbuf >= size)
170 break;
171 }
172}
173
174static void ath_rx_remove_buffer(struct ath_softc *sc,
175 enum ath9k_rx_qtype qtype)
176{
177 struct ath_buf *bf;
178 struct ath_rx_edma *rx_edma;
179 struct sk_buff *skb;
180
181 rx_edma = &sc->rx.rx_edma[qtype];
182
183 while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
184 bf = SKB_CB_ATHBUF(skb);
185 BUG_ON(!bf);
186 list_add_tail(&bf->list, &sc->rx.rxbuf);
187 }
188}
189
190static void ath_rx_edma_cleanup(struct ath_softc *sc)
191{
192 struct ath_buf *bf;
193
194 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
195 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
196
145 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 197 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
198 if (bf->bf_mpdu)
199 dev_kfree_skb_any(bf->bf_mpdu);
200 }
201
202 INIT_LIST_HEAD(&sc->rx.rxbuf);
203
204 kfree(sc->rx.rx_bufptr);
205 sc->rx.rx_bufptr = NULL;
206}
207
208static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
209{
210 skb_queue_head_init(&rx_edma->rx_fifo);
211 skb_queue_head_init(&rx_edma->rx_buffers);
212 rx_edma->rx_fifo_hwsize = size;
213}
214
215static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
216{
217 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
218 struct ath_hw *ah = sc->sc_ah;
219 struct sk_buff *skb;
220 struct ath_buf *bf;
221 int error = 0, i;
222 u32 size;
223
224
225 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN +
226 ah->caps.rx_status_len,
227 min(common->cachelsz, (u16)64));
228
229 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
230 ah->caps.rx_status_len);
231
232 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
233 ah->caps.rx_lp_qdepth);
234 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
235 ah->caps.rx_hp_qdepth);
236
237 size = sizeof(struct ath_buf) * nbufs;
238 bf = kzalloc(size, GFP_KERNEL);
239 if (!bf)
240 return -ENOMEM;
241
242 INIT_LIST_HEAD(&sc->rx.rxbuf);
243 sc->rx.rx_bufptr = bf;
244
245 for (i = 0; i < nbufs; i++, bf++) {
146 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); 246 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
147 if (skb == NULL) { 247 if (!skb) {
148 error = -ENOMEM; 248 error = -ENOMEM;
149 goto err; 249 goto rx_init_fail;
150 } 250 }
151 251
252 memset(skb->data, 0, common->rx_bufsize);
152 bf->bf_mpdu = skb; 253 bf->bf_mpdu = skb;
254
153 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 255 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
154 common->rx_bufsize, 256 common->rx_bufsize,
155 DMA_FROM_DEVICE); 257 DMA_BIDIRECTIONAL);
156 if (unlikely(dma_mapping_error(sc->dev, 258 if (unlikely(dma_mapping_error(sc->dev,
157 bf->bf_buf_addr))) { 259 bf->bf_buf_addr))) {
158 dev_kfree_skb_any(skb); 260 dev_kfree_skb_any(skb);
159 bf->bf_mpdu = NULL; 261 bf->bf_mpdu = NULL;
262 ath_print(common, ATH_DBG_FATAL,
263 "dma_mapping_error() on RX init\n");
264 error = -ENOMEM;
265 goto rx_init_fail;
266 }
267
268 list_add_tail(&bf->list, &sc->rx.rxbuf);
269 }
270
271 return 0;
272
273rx_init_fail:
274 ath_rx_edma_cleanup(sc);
275 return error;
276}
277
278static void ath_edma_start_recv(struct ath_softc *sc)
279{
280 spin_lock_bh(&sc->rx.rxbuflock);
281
282 ath9k_hw_rxena(sc->sc_ah);
283
284 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
285 sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize);
286
287 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
288 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
289
290 spin_unlock_bh(&sc->rx.rxbuflock);
291
292 ath_opmode_init(sc);
293
294 ath9k_hw_startpcureceive(sc->sc_ah);
295}
296
297static void ath_edma_stop_recv(struct ath_softc *sc)
298{
299 spin_lock_bh(&sc->rx.rxbuflock);
300 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
301 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
302 spin_unlock_bh(&sc->rx.rxbuflock);
303}
304
305int ath_rx_init(struct ath_softc *sc, int nbufs)
306{
307 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
308 struct sk_buff *skb;
309 struct ath_buf *bf;
310 int error = 0;
311
312 spin_lock_init(&sc->rx.rxflushlock);
313 sc->sc_flags &= ~SC_OP_RXFLUSH;
314 spin_lock_init(&sc->rx.rxbuflock);
315
316 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
317 return ath_rx_edma_init(sc, nbufs);
318 } else {
319 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
320 min(common->cachelsz, (u16)64));
321
322 ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
323 common->cachelsz, common->rx_bufsize);
324
325 /* Initialize rx descriptors */
326
327 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
328 "rx", nbufs, 1, 0);
329 if (error != 0) {
160 ath_print(common, ATH_DBG_FATAL, 330 ath_print(common, ATH_DBG_FATAL,
161 "dma_mapping_error() on RX init\n"); 331 "failed to allocate rx descriptors: %d\n",
162 error = -ENOMEM; 332 error);
163 goto err; 333 goto err;
164 } 334 }
165 bf->bf_dmacontext = bf->bf_buf_addr; 335
336 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
337 skb = ath_rxbuf_alloc(common, common->rx_bufsize,
338 GFP_KERNEL);
339 if (skb == NULL) {
340 error = -ENOMEM;
341 goto err;
342 }
343
344 bf->bf_mpdu = skb;
345 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
346 common->rx_bufsize,
347 DMA_FROM_DEVICE);
348 if (unlikely(dma_mapping_error(sc->dev,
349 bf->bf_buf_addr))) {
350 dev_kfree_skb_any(skb);
351 bf->bf_mpdu = NULL;
352 ath_print(common, ATH_DBG_FATAL,
353 "dma_mapping_error() on RX init\n");
354 error = -ENOMEM;
355 goto err;
356 }
357 bf->bf_dmacontext = bf->bf_buf_addr;
358 }
359 sc->rx.rxlink = NULL;
166 } 360 }
167 sc->rx.rxlink = NULL;
168 361
169err: 362err:
170 if (error) 363 if (error)
@@ -180,17 +373,23 @@ void ath_rx_cleanup(struct ath_softc *sc)
180 struct sk_buff *skb; 373 struct sk_buff *skb;
181 struct ath_buf *bf; 374 struct ath_buf *bf;
182 375
183 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 376 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
184 skb = bf->bf_mpdu; 377 ath_rx_edma_cleanup(sc);
185 if (skb) { 378 return;
186 dma_unmap_single(sc->dev, bf->bf_buf_addr, 379 } else {
187 common->rx_bufsize, DMA_FROM_DEVICE); 380 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
188 dev_kfree_skb(skb); 381 skb = bf->bf_mpdu;
382 if (skb) {
383 dma_unmap_single(sc->dev, bf->bf_buf_addr,
384 common->rx_bufsize,
385 DMA_FROM_DEVICE);
386 dev_kfree_skb(skb);
387 }
189 } 388 }
190 }
191 389
192 if (sc->rx.rxdma.dd_desc_len != 0) 390 if (sc->rx.rxdma.dd_desc_len != 0)
193 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); 391 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
392 }
194} 393}
195 394
196/* 395/*
@@ -273,6 +472,11 @@ int ath_startrecv(struct ath_softc *sc)
273 struct ath_hw *ah = sc->sc_ah; 472 struct ath_hw *ah = sc->sc_ah;
274 struct ath_buf *bf, *tbf; 473 struct ath_buf *bf, *tbf;
275 474
475 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
476 ath_edma_start_recv(sc);
477 return 0;
478 }
479
276 spin_lock_bh(&sc->rx.rxbuflock); 480 spin_lock_bh(&sc->rx.rxbuflock);
277 if (list_empty(&sc->rx.rxbuf)) 481 if (list_empty(&sc->rx.rxbuf))
278 goto start_recv; 482 goto start_recv;
@@ -306,7 +510,11 @@ bool ath_stoprecv(struct ath_softc *sc)
306 ath9k_hw_stoppcurecv(ah); 510 ath9k_hw_stoppcurecv(ah);
307 ath9k_hw_setrxfilter(ah, 0); 511 ath9k_hw_setrxfilter(ah, 0);
308 stopped = ath9k_hw_stopdmarecv(ah); 512 stopped = ath9k_hw_stopdmarecv(ah);
309 sc->rx.rxlink = NULL; 513
514 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
515 ath_edma_stop_recv(sc);
516 else
517 sc->rx.rxlink = NULL;
310 518
311 return stopped; 519 return stopped;
312} 520}
@@ -315,7 +523,9 @@ void ath_flushrecv(struct ath_softc *sc)
315{ 523{
316 spin_lock_bh(&sc->rx.rxflushlock); 524 spin_lock_bh(&sc->rx.rxflushlock);
317 sc->sc_flags |= SC_OP_RXFLUSH; 525 sc->sc_flags |= SC_OP_RXFLUSH;
318 ath_rx_tasklet(sc, 1); 526 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
527 ath_rx_tasklet(sc, 1, true);
528 ath_rx_tasklet(sc, 1, false);
319 sc->sc_flags &= ~SC_OP_RXFLUSH; 529 sc->sc_flags &= ~SC_OP_RXFLUSH;
320 spin_unlock_bh(&sc->rx.rxflushlock); 530 spin_unlock_bh(&sc->rx.rxflushlock);
321} 531}
@@ -469,14 +679,147 @@ static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw,
469 ieee80211_rx(hw, skb); 679 ieee80211_rx(hw, skb);
470} 680}
471 681
472int ath_rx_tasklet(struct ath_softc *sc, int flush) 682static bool ath_edma_get_buffers(struct ath_softc *sc,
683 enum ath9k_rx_qtype qtype)
473{ 684{
474#define PA2DESC(_sc, _pa) \ 685 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
475 ((struct ath_desc *)((caddr_t)(_sc)->rx.rxdma.dd_desc + \ 686 struct ath_hw *ah = sc->sc_ah;
476 ((_pa) - (_sc)->rx.rxdma.dd_desc_paddr))) 687 struct ath_common *common = ath9k_hw_common(ah);
688 struct sk_buff *skb;
689 struct ath_buf *bf;
690 int ret;
691
692 skb = skb_peek(&rx_edma->rx_fifo);
693 if (!skb)
694 return false;
695
696 bf = SKB_CB_ATHBUF(skb);
697 BUG_ON(!bf);
698
699 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
700 common->rx_bufsize, DMA_FROM_DEVICE);
701
702 ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
703 if (ret == -EINPROGRESS)
704 return false;
705
706 __skb_unlink(skb, &rx_edma->rx_fifo);
707 if (ret == -EINVAL) {
708 /* corrupt descriptor, skip this one and the following one */
709 list_add_tail(&bf->list, &sc->rx.rxbuf);
710 ath_rx_edma_buf_link(sc, qtype);
711 skb = skb_peek(&rx_edma->rx_fifo);
712 if (!skb)
713 return true;
714
715 bf = SKB_CB_ATHBUF(skb);
716 BUG_ON(!bf);
717
718 __skb_unlink(skb, &rx_edma->rx_fifo);
719 list_add_tail(&bf->list, &sc->rx.rxbuf);
720 ath_rx_edma_buf_link(sc, qtype);
721 }
722 skb_queue_tail(&rx_edma->rx_buffers, skb);
723
724 return true;
725}
477 726
727static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
728 struct ath_rx_status *rs,
729 enum ath9k_rx_qtype qtype)
730{
731 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
732 struct sk_buff *skb;
478 struct ath_buf *bf; 733 struct ath_buf *bf;
734
735 while (ath_edma_get_buffers(sc, qtype));
736 skb = __skb_dequeue(&rx_edma->rx_buffers);
737 if (!skb)
738 return NULL;
739
740 bf = SKB_CB_ATHBUF(skb);
741 ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data);
742 return bf;
743}
744
745static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
746 struct ath_rx_status *rs)
747{
748 struct ath_hw *ah = sc->sc_ah;
749 struct ath_common *common = ath9k_hw_common(ah);
479 struct ath_desc *ds; 750 struct ath_desc *ds;
751 struct ath_buf *bf;
752 int ret;
753
754 if (list_empty(&sc->rx.rxbuf)) {
755 sc->rx.rxlink = NULL;
756 return NULL;
757 }
758
759 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
760 ds = bf->bf_desc;
761
762 /*
763 * Must provide the virtual address of the current
764 * descriptor, the physical address, and the virtual
765 * address of the next descriptor in the h/w chain.
766 * This allows the HAL to look ahead to see if the
767 * hardware is done with a descriptor by checking the
768 * done bit in the following descriptor and the address
769 * of the current descriptor the DMA engine is working
770 * on. All this is necessary because of our use of
771 * a self-linked list to avoid rx overruns.
772 */
773 ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0);
774 if (ret == -EINPROGRESS) {
775 struct ath_rx_status trs;
776 struct ath_buf *tbf;
777 struct ath_desc *tds;
778
779 memset(&trs, 0, sizeof(trs));
780 if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
781 sc->rx.rxlink = NULL;
782 return NULL;
783 }
784
785 tbf = list_entry(bf->list.next, struct ath_buf, list);
786
787 /*
788 * On some hardware the descriptor status words could
789 * get corrupted, including the done bit. Because of
790 * this, check if the next descriptor's done bit is
791 * set or not.
792 *
793 * If the next descriptor's done bit is set, the current
794 * descriptor has been corrupted. Force s/w to discard
795 * this descriptor and continue...
796 */
797
798 tds = tbf->bf_desc;
799 ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
800 if (ret == -EINPROGRESS)
801 return NULL;
802 }
803
804 if (!bf->bf_mpdu)
805 return bf;
806
807 /*
808 * Synchronize the DMA transfer with CPU before
809 * 1. accessing the frame
810 * 2. requeueing the same buffer to h/w
811 */
812 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
813 common->rx_bufsize,
814 DMA_FROM_DEVICE);
815
816 return bf;
817}
818
819
820int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
821{
822 struct ath_buf *bf;
480 struct sk_buff *skb = NULL, *requeue_skb; 823 struct sk_buff *skb = NULL, *requeue_skb;
481 struct ieee80211_rx_status *rxs; 824 struct ieee80211_rx_status *rxs;
482 struct ath_hw *ah = sc->sc_ah; 825 struct ath_hw *ah = sc->sc_ah;
@@ -491,7 +834,16 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
491 int retval; 834 int retval;
492 bool decrypt_error = false; 835 bool decrypt_error = false;
493 struct ath_rx_status rs; 836 struct ath_rx_status rs;
837 enum ath9k_rx_qtype qtype;
838 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
839 int dma_type;
494 840
841 if (edma)
842 dma_type = DMA_FROM_DEVICE;
843 else
844 dma_type = DMA_BIDIRECTIONAL;
845
846 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
495 spin_lock_bh(&sc->rx.rxbuflock); 847 spin_lock_bh(&sc->rx.rxbuflock);
496 848
497 do { 849 do {
@@ -499,71 +851,19 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
499 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) 851 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
500 break; 852 break;
501 853
502 if (list_empty(&sc->rx.rxbuf)) {
503 sc->rx.rxlink = NULL;
504 break;
505 }
506
507 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
508 ds = bf->bf_desc;
509
510 /*
511 * Must provide the virtual address of the current
512 * descriptor, the physical address, and the virtual
513 * address of the next descriptor in the h/w chain.
514 * This allows the HAL to look ahead to see if the
515 * hardware is done with a descriptor by checking the
516 * done bit in the following descriptor and the address
517 * of the current descriptor the DMA engine is working
518 * on. All this is necessary because of our use of
519 * a self-linked list to avoid rx overruns.
520 */
521 memset(&rs, 0, sizeof(rs)); 854 memset(&rs, 0, sizeof(rs));
522 retval = ath9k_hw_rxprocdesc(ah, ds, &rs, 0); 855 if (edma)
523 if (retval == -EINPROGRESS) { 856 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
524 struct ath_rx_status trs; 857 else
525 struct ath_buf *tbf; 858 bf = ath_get_next_rx_buf(sc, &rs);
526 struct ath_desc *tds;
527
528 memset(&trs, 0, sizeof(trs));
529 if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
530 sc->rx.rxlink = NULL;
531 break;
532 }
533 859
534 tbf = list_entry(bf->list.next, struct ath_buf, list); 860 if (!bf)
535 861 break;
536 /*
537 * On some hardware the descriptor status words could
538 * get corrupted, including the done bit. Because of
539 * this, check if the next descriptor's done bit is
540 * set or not.
541 *
542 * If the next descriptor's done bit is set, the current
543 * descriptor has been corrupted. Force s/w to discard
544 * this descriptor and continue...
545 */
546
547 tds = tbf->bf_desc;
548 retval = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
549 if (retval == -EINPROGRESS) {
550 break;
551 }
552 }
553 862
554 skb = bf->bf_mpdu; 863 skb = bf->bf_mpdu;
555 if (!skb) 864 if (!skb)
556 continue; 865 continue;
557 866
558 /*
559 * Synchronize the DMA transfer with CPU before
560 * 1. accessing the frame
561 * 2. requeueing the same buffer to h/w
562 */
563 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
564 common->rx_bufsize,
565 DMA_FROM_DEVICE);
566
567 hdr = (struct ieee80211_hdr *) skb->data; 867 hdr = (struct ieee80211_hdr *) skb->data;
568 rxs = IEEE80211_SKB_RXCB(skb); 868 rxs = IEEE80211_SKB_RXCB(skb);
569 869
@@ -597,9 +897,11 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
597 /* Unmap the frame */ 897 /* Unmap the frame */
598 dma_unmap_single(sc->dev, bf->bf_buf_addr, 898 dma_unmap_single(sc->dev, bf->bf_buf_addr,
599 common->rx_bufsize, 899 common->rx_bufsize,
600 DMA_FROM_DEVICE); 900 dma_type);
601 901
602 skb_put(skb, rs.rs_datalen); 902 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
903 if (ah->caps.rx_status_len)
904 skb_pull(skb, ah->caps.rx_status_len);
603 905
604 ath9k_cmn_rx_skb_postprocess(common, skb, &rs, 906 ath9k_cmn_rx_skb_postprocess(common, skb, &rs,
605 rxs, decrypt_error); 907 rxs, decrypt_error);
@@ -608,7 +910,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
608 bf->bf_mpdu = requeue_skb; 910 bf->bf_mpdu = requeue_skb;
609 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 911 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
610 common->rx_bufsize, 912 common->rx_bufsize,
611 DMA_FROM_DEVICE); 913 dma_type);
612 if (unlikely(dma_mapping_error(sc->dev, 914 if (unlikely(dma_mapping_error(sc->dev,
613 bf->bf_buf_addr))) { 915 bf->bf_buf_addr))) {
614 dev_kfree_skb_any(requeue_skb); 916 dev_kfree_skb_any(requeue_skb);
@@ -639,12 +941,16 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
639 ath_rx_send_to_mac80211(hw, sc, skb, rxs); 941 ath_rx_send_to_mac80211(hw, sc, skb, rxs);
640 942
641requeue: 943requeue:
642 list_move_tail(&bf->list, &sc->rx.rxbuf); 944 if (edma) {
643 ath_rx_buf_link(sc, bf); 945 list_add_tail(&bf->list, &sc->rx.rxbuf);
946 ath_rx_edma_buf_link(sc, qtype);
947 } else {
948 list_move_tail(&bf->list, &sc->rx.rxbuf);
949 ath_rx_buf_link(sc, bf);
950 }
644 } while (1); 951 } while (1);
645 952
646 spin_unlock_bh(&sc->rx.rxbuflock); 953 spin_unlock_bh(&sc->rx.rxbuflock);
647 954
648 return 0; 955 return 0;
649#undef PA2DESC
650} 956}
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 7e36ad7421b7..d4371a43bdaa 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -20,7 +20,7 @@
20#include "../reg.h" 20#include "../reg.h"
21 21
22#define AR_CR 0x0008 22#define AR_CR 0x0008
23#define AR_CR_RXE 0x00000004 23#define AR_CR_RXE (AR_SREV_9300_20_OR_LATER(ah) ? 0x0000000c : 0x00000004)
24#define AR_CR_RXD 0x00000020 24#define AR_CR_RXD 0x00000020
25#define AR_CR_SWI 0x00000040 25#define AR_CR_SWI 0x00000040
26 26
@@ -39,6 +39,12 @@
39#define AR_CFG_PCI_MASTER_REQ_Q_THRESH 0x00060000 39#define AR_CFG_PCI_MASTER_REQ_Q_THRESH 0x00060000
40#define AR_CFG_PCI_MASTER_REQ_Q_THRESH_S 17 40#define AR_CFG_PCI_MASTER_REQ_Q_THRESH_S 17
41 41
42#define AR_RXBP_THRESH 0x0018
43#define AR_RXBP_THRESH_HP 0x0000000f
44#define AR_RXBP_THRESH_HP_S 0
45#define AR_RXBP_THRESH_LP 0x00003f00
46#define AR_RXBP_THRESH_LP_S 8
47
42#define AR_MIRT 0x0020 48#define AR_MIRT 0x0020
43#define AR_MIRT_VAL 0x0000ffff 49#define AR_MIRT_VAL 0x0000ffff
44#define AR_MIRT_VAL_S 16 50#define AR_MIRT_VAL_S 16
@@ -144,6 +150,9 @@
144#define AR_MACMISC_MISC_OBS_BUS_MSB_S 15 150#define AR_MACMISC_MISC_OBS_BUS_MSB_S 15
145#define AR_MACMISC_MISC_OBS_BUS_1 1 151#define AR_MACMISC_MISC_OBS_BUS_1 1
146 152
153#define AR_DATABUF_SIZE 0x0060
154#define AR_DATABUF_SIZE_MASK 0x00000FFF
155
147#define AR_GTXTO 0x0064 156#define AR_GTXTO 0x0064
148#define AR_GTXTO_TIMEOUT_COUNTER 0x0000FFFF 157#define AR_GTXTO_TIMEOUT_COUNTER 0x0000FFFF
149#define AR_GTXTO_TIMEOUT_LIMIT 0xFFFF0000 158#define AR_GTXTO_TIMEOUT_LIMIT 0xFFFF0000
@@ -160,9 +169,14 @@
160#define AR_CST_TIMEOUT_LIMIT 0xFFFF0000 169#define AR_CST_TIMEOUT_LIMIT 0xFFFF0000
161#define AR_CST_TIMEOUT_LIMIT_S 16 170#define AR_CST_TIMEOUT_LIMIT_S 16
162 171
172#define AR_HP_RXDP 0x0074
173#define AR_LP_RXDP 0x0078
174
163#define AR_ISR 0x0080 175#define AR_ISR 0x0080
164#define AR_ISR_RXOK 0x00000001 176#define AR_ISR_RXOK 0x00000001
165#define AR_ISR_RXDESC 0x00000002 177#define AR_ISR_RXDESC 0x00000002
178#define AR_ISR_HP_RXOK 0x00000001
179#define AR_ISR_LP_RXOK 0x00000002
166#define AR_ISR_RXERR 0x00000004 180#define AR_ISR_RXERR 0x00000004
167#define AR_ISR_RXNOPKT 0x00000008 181#define AR_ISR_RXNOPKT 0x00000008
168#define AR_ISR_RXEOL 0x00000010 182#define AR_ISR_RXEOL 0x00000010
@@ -232,7 +246,6 @@
232#define AR_ISR_S5_TIMER_THRESH 0x0007FE00 246#define AR_ISR_S5_TIMER_THRESH 0x0007FE00
233#define AR_ISR_S5_TIM_TIMER 0x00000010 247#define AR_ISR_S5_TIM_TIMER 0x00000010
234#define AR_ISR_S5_DTIM_TIMER 0x00000020 248#define AR_ISR_S5_DTIM_TIMER 0x00000020
235#define AR_ISR_S5_S 0x00d8
236#define AR_IMR_S5 0x00b8 249#define AR_IMR_S5 0x00b8
237#define AR_IMR_S5_TIM_TIMER 0x00000010 250#define AR_IMR_S5_TIM_TIMER 0x00000010
238#define AR_IMR_S5_DTIM_TIMER 0x00000020 251#define AR_IMR_S5_DTIM_TIMER 0x00000020
@@ -240,7 +253,6 @@
240#define AR_ISR_S5_GENTIMER_TRIG_S 0 253#define AR_ISR_S5_GENTIMER_TRIG_S 0
241#define AR_ISR_S5_GENTIMER_THRESH 0xFF800000 254#define AR_ISR_S5_GENTIMER_THRESH 0xFF800000
242#define AR_ISR_S5_GENTIMER_THRESH_S 16 255#define AR_ISR_S5_GENTIMER_THRESH_S 16
243#define AR_ISR_S5_S 0x00d8
244#define AR_IMR_S5_GENTIMER_TRIG 0x0000FF80 256#define AR_IMR_S5_GENTIMER_TRIG 0x0000FF80
245#define AR_IMR_S5_GENTIMER_TRIG_S 0 257#define AR_IMR_S5_GENTIMER_TRIG_S 0
246#define AR_IMR_S5_GENTIMER_THRESH 0xFF800000 258#define AR_IMR_S5_GENTIMER_THRESH 0xFF800000
@@ -249,6 +261,8 @@
249#define AR_IMR 0x00a0 261#define AR_IMR 0x00a0
250#define AR_IMR_RXOK 0x00000001 262#define AR_IMR_RXOK 0x00000001
251#define AR_IMR_RXDESC 0x00000002 263#define AR_IMR_RXDESC 0x00000002
264#define AR_IMR_RXOK_HP 0x00000001
265#define AR_IMR_RXOK_LP 0x00000002
252#define AR_IMR_RXERR 0x00000004 266#define AR_IMR_RXERR 0x00000004
253#define AR_IMR_RXNOPKT 0x00000008 267#define AR_IMR_RXNOPKT 0x00000008
254#define AR_IMR_RXEOL 0x00000010 268#define AR_IMR_RXEOL 0x00000010
@@ -332,10 +346,10 @@
332#define AR_ISR_S1_QCU_TXEOL 0x03FF0000 346#define AR_ISR_S1_QCU_TXEOL 0x03FF0000
333#define AR_ISR_S1_QCU_TXEOL_S 16 347#define AR_ISR_S1_QCU_TXEOL_S 16
334 348
335#define AR_ISR_S2_S 0x00cc 349#define AR_ISR_S2_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d0 : 0x00cc)
336#define AR_ISR_S3_S 0x00d0 350#define AR_ISR_S3_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d4 : 0x00d0)
337#define AR_ISR_S4_S 0x00d4 351#define AR_ISR_S4_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d8 : 0x00d4)
338#define AR_ISR_S5_S 0x00d8 352#define AR_ISR_S5_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00dc : 0x00d8)
339#define AR_DMADBG_0 0x00e0 353#define AR_DMADBG_0 0x00e0
340#define AR_DMADBG_1 0x00e4 354#define AR_DMADBG_1 0x00e4
341#define AR_DMADBG_2 0x00e8 355#define AR_DMADBG_2 0x00e8
@@ -369,6 +383,9 @@
369#define AR_Q9_TXDP 0x0824 383#define AR_Q9_TXDP 0x0824
370#define AR_QTXDP(_i) (AR_Q0_TXDP + ((_i)<<2)) 384#define AR_QTXDP(_i) (AR_Q0_TXDP + ((_i)<<2))
371 385
386#define AR_Q_STATUS_RING_START 0x830
387#define AR_Q_STATUS_RING_END 0x834
388
372#define AR_Q_TXE 0x0840 389#define AR_Q_TXE 0x0840
373#define AR_Q_TXE_M 0x000003FF 390#define AR_Q_TXE_M 0x000003FF
374 391
@@ -461,6 +478,10 @@
461#define AR_Q_RDYTIMESHDN 0x0a40 478#define AR_Q_RDYTIMESHDN 0x0a40
462#define AR_Q_RDYTIMESHDN_M 0x000003FF 479#define AR_Q_RDYTIMESHDN_M 0x000003FF
463 480
481/* MAC Descriptor CRC check */
482#define AR_Q_DESC_CRCCHK 0xa44
483/* Enable CRC check on the descriptor fetched from host */
484#define AR_Q_DESC_CRCCHK_EN 1
464 485
465#define AR_NUM_DCU 10 486#define AR_NUM_DCU 10
466#define AR_DCU_0 0x0001 487#define AR_DCU_0 0x0001
@@ -759,6 +780,8 @@
759#define AR_SREV_VERSION_9271 0x140 780#define AR_SREV_VERSION_9271 0x140
760#define AR_SREV_REVISION_9271_10 0 781#define AR_SREV_REVISION_9271_10 0
761#define AR_SREV_REVISION_9271_11 1 782#define AR_SREV_REVISION_9271_11 1
783#define AR_SREV_VERSION_9300 0x1c0
784#define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */
762 785
763#define AR_SREV_5416(_ah) \ 786#define AR_SREV_5416(_ah) \
764 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \ 787 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
@@ -844,6 +867,15 @@
844#define AR_SREV_9271_11(_ah) \ 867#define AR_SREV_9271_11(_ah) \
845 (AR_SREV_9271(_ah) && \ 868 (AR_SREV_9271(_ah) && \
846 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9271_11)) 869 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9271_11))
870#define AR_SREV_9300(_ah) \
871 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300))
872#define AR_SREV_9300_20(_ah) \
873 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300) && \
874 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9300_20))
875#define AR_SREV_9300_20_OR_LATER(_ah) \
876 (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9300) || \
877 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300) && \
878 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9300_20)))
847 879
848#define AR_SREV_9285E_20(_ah) \ 880#define AR_SREV_9285E_20(_ah) \
849 (AR_SREV_9285_12_OR_LATER(_ah) && \ 881 (AR_SREV_9285_12_OR_LATER(_ah) && \
@@ -945,6 +977,7 @@ enum {
945#define AR9285_NUM_GPIO 12 977#define AR9285_NUM_GPIO 12
946#define AR9287_NUM_GPIO 11 978#define AR9287_NUM_GPIO 11
947#define AR9271_NUM_GPIO 16 979#define AR9271_NUM_GPIO 16
980#define AR9300_NUM_GPIO 17
948 981
949#define AR_GPIO_IN_OUT 0x4048 982#define AR_GPIO_IN_OUT 0x4048
950#define AR_GPIO_IN_VAL 0x0FFFC000 983#define AR_GPIO_IN_VAL 0x0FFFC000
@@ -957,19 +990,21 @@ enum {
957#define AR9287_GPIO_IN_VAL_S 11 990#define AR9287_GPIO_IN_VAL_S 11
958#define AR9271_GPIO_IN_VAL 0xFFFF0000 991#define AR9271_GPIO_IN_VAL 0xFFFF0000
959#define AR9271_GPIO_IN_VAL_S 16 992#define AR9271_GPIO_IN_VAL_S 16
993#define AR9300_GPIO_IN_VAL 0x0001FFFF
994#define AR9300_GPIO_IN_VAL_S 0
960 995
961#define AR_GPIO_OE_OUT 0x404c 996#define AR_GPIO_OE_OUT (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c)
962#define AR_GPIO_OE_OUT_DRV 0x3 997#define AR_GPIO_OE_OUT_DRV 0x3
963#define AR_GPIO_OE_OUT_DRV_NO 0x0 998#define AR_GPIO_OE_OUT_DRV_NO 0x0
964#define AR_GPIO_OE_OUT_DRV_LOW 0x1 999#define AR_GPIO_OE_OUT_DRV_LOW 0x1
965#define AR_GPIO_OE_OUT_DRV_HI 0x2 1000#define AR_GPIO_OE_OUT_DRV_HI 0x2
966#define AR_GPIO_OE_OUT_DRV_ALL 0x3 1001#define AR_GPIO_OE_OUT_DRV_ALL 0x3
967 1002
968#define AR_GPIO_INTR_POL 0x4050 1003#define AR_GPIO_INTR_POL (AR_SREV_9300_20_OR_LATER(ah) ? 0x4058 : 0x4050)
969#define AR_GPIO_INTR_POL_VAL 0x00001FFF 1004#define AR_GPIO_INTR_POL_VAL 0x0001FFFF
970#define AR_GPIO_INTR_POL_VAL_S 0 1005#define AR_GPIO_INTR_POL_VAL_S 0
971 1006
972#define AR_GPIO_INPUT_EN_VAL 0x4054 1007#define AR_GPIO_INPUT_EN_VAL (AR_SREV_9300_20_OR_LATER(ah) ? 0x405c : 0x4054)
973#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF 0x00000004 1008#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF 0x00000004
974#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_S 2 1009#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_S 2
975#define AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF 0x00000008 1010#define AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF 0x00000008
@@ -987,13 +1022,13 @@ enum {
987#define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000 1022#define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000
988#define AR_GPIO_JTAG_DISABLE 0x00020000 1023#define AR_GPIO_JTAG_DISABLE 0x00020000
989 1024
990#define AR_GPIO_INPUT_MUX1 0x4058 1025#define AR_GPIO_INPUT_MUX1 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4060 : 0x4058)
991#define AR_GPIO_INPUT_MUX1_BT_ACTIVE 0x000f0000 1026#define AR_GPIO_INPUT_MUX1_BT_ACTIVE 0x000f0000
992#define AR_GPIO_INPUT_MUX1_BT_ACTIVE_S 16 1027#define AR_GPIO_INPUT_MUX1_BT_ACTIVE_S 16
993#define AR_GPIO_INPUT_MUX1_BT_PRIORITY 0x00000f00 1028#define AR_GPIO_INPUT_MUX1_BT_PRIORITY 0x00000f00
994#define AR_GPIO_INPUT_MUX1_BT_PRIORITY_S 8 1029#define AR_GPIO_INPUT_MUX1_BT_PRIORITY_S 8
995 1030
996#define AR_GPIO_INPUT_MUX2 0x405c 1031#define AR_GPIO_INPUT_MUX2 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4064 : 0x405c)
997#define AR_GPIO_INPUT_MUX2_CLK25 0x0000000f 1032#define AR_GPIO_INPUT_MUX2_CLK25 0x0000000f
998#define AR_GPIO_INPUT_MUX2_CLK25_S 0 1033#define AR_GPIO_INPUT_MUX2_CLK25_S 0
999#define AR_GPIO_INPUT_MUX2_RFSILENT 0x000000f0 1034#define AR_GPIO_INPUT_MUX2_RFSILENT 0x000000f0
@@ -1001,13 +1036,13 @@ enum {
1001#define AR_GPIO_INPUT_MUX2_RTC_RESET 0x00000f00 1036#define AR_GPIO_INPUT_MUX2_RTC_RESET 0x00000f00
1002#define AR_GPIO_INPUT_MUX2_RTC_RESET_S 8 1037#define AR_GPIO_INPUT_MUX2_RTC_RESET_S 8
1003 1038
1004#define AR_GPIO_OUTPUT_MUX1 0x4060 1039#define AR_GPIO_OUTPUT_MUX1 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4068 : 0x4060)
1005#define AR_GPIO_OUTPUT_MUX2 0x4064 1040#define AR_GPIO_OUTPUT_MUX2 (AR_SREV_9300_20_OR_LATER(ah) ? 0x406c : 0x4064)
1006#define AR_GPIO_OUTPUT_MUX3 0x4068 1041#define AR_GPIO_OUTPUT_MUX3 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4070 : 0x4068)
1007 1042
1008#define AR_INPUT_STATE 0x406c 1043#define AR_INPUT_STATE (AR_SREV_9300_20_OR_LATER(ah) ? 0x4074 : 0x406c)
1009 1044
1010#define AR_EEPROM_STATUS_DATA 0x407c 1045#define AR_EEPROM_STATUS_DATA (AR_SREV_9300_20_OR_LATER(ah) ? 0x4084 : 0x407c)
1011#define AR_EEPROM_STATUS_DATA_VAL 0x0000ffff 1046#define AR_EEPROM_STATUS_DATA_VAL 0x0000ffff
1012#define AR_EEPROM_STATUS_DATA_VAL_S 0 1047#define AR_EEPROM_STATUS_DATA_VAL_S 0
1013#define AR_EEPROM_STATUS_DATA_BUSY 0x00010000 1048#define AR_EEPROM_STATUS_DATA_BUSY 0x00010000
@@ -1015,13 +1050,24 @@ enum {
1015#define AR_EEPROM_STATUS_DATA_PROT_ACCESS 0x00040000 1050#define AR_EEPROM_STATUS_DATA_PROT_ACCESS 0x00040000
1016#define AR_EEPROM_STATUS_DATA_ABSENT_ACCESS 0x00080000 1051#define AR_EEPROM_STATUS_DATA_ABSENT_ACCESS 0x00080000
1017 1052
1018#define AR_OBS 0x4080 1053#define AR_OBS (AR_SREV_9300_20_OR_LATER(ah) ? 0x4088 : 0x4080)
1019 1054
1020#define AR_GPIO_PDPU 0x4088 1055#define AR_GPIO_PDPU (AR_SREV_9300_20_OR_LATER(ah) ? 0x4090 : 0x4088)
1021 1056
1022#define AR_PCIE_MSI 0x4094 1057#define AR_PCIE_MSI (AR_SREV_9300_20_OR_LATER(ah) ? 0x40a4 : 0x4094)
1023#define AR_PCIE_MSI_ENABLE 0x00000001 1058#define AR_PCIE_MSI_ENABLE 0x00000001
1024 1059
1060#define AR_INTR_PRIO_SYNC_ENABLE 0x40c4
1061#define AR_INTR_PRIO_ASYNC_MASK 0x40c8
1062#define AR_INTR_PRIO_SYNC_MASK 0x40cc
1063#define AR_INTR_PRIO_ASYNC_ENABLE 0x40d4
1064
1065#define AR_RTC_9300_PLL_DIV 0x000003ff
1066#define AR_RTC_9300_PLL_DIV_S 0
1067#define AR_RTC_9300_PLL_REFDIV 0x00003C00
1068#define AR_RTC_9300_PLL_REFDIV_S 10
1069#define AR_RTC_9300_PLL_CLKSEL 0x0000C000
1070#define AR_RTC_9300_PLL_CLKSEL_S 14
1025 1071
1026#define AR_RTC_9160_PLL_DIV 0x000003ff 1072#define AR_RTC_9160_PLL_DIV 0x000003ff
1027#define AR_RTC_9160_PLL_DIV_S 0 1073#define AR_RTC_9160_PLL_DIV_S 0
@@ -1039,6 +1085,16 @@ enum {
1039#define AR_RTC_RC_COLD_RESET 0x00000004 1085#define AR_RTC_RC_COLD_RESET 0x00000004
1040#define AR_RTC_RC_WARM_RESET 0x00000008 1086#define AR_RTC_RC_WARM_RESET 0x00000008
1041 1087
1088/* Crystal Control */
1089#define AR_RTC_XTAL_CONTROL 0x7004
1090
1091/* Reg Control 0 */
1092#define AR_RTC_REG_CONTROL0 0x7008
1093
1094/* Reg Control 1 */
1095#define AR_RTC_REG_CONTROL1 0x700c
1096#define AR_RTC_REG_CONTROL1_SWREG_PROGRAM 0x00000001
1097
1042#define AR_RTC_PLL_CONTROL \ 1098#define AR_RTC_PLL_CONTROL \
1043 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014) 1099 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014)
1044 1100
@@ -1069,6 +1125,7 @@ enum {
1069#define AR_RTC_SLEEP_CLK \ 1125#define AR_RTC_SLEEP_CLK \
1070 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0048) : 0x7048) 1126 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0048) : 0x7048)
1071#define AR_RTC_FORCE_DERIVED_CLK 0x2 1127#define AR_RTC_FORCE_DERIVED_CLK 0x2
1128#define AR_RTC_FORCE_SWREG_PRD 0x00000004
1072 1129
1073#define AR_RTC_FORCE_WAKE \ 1130#define AR_RTC_FORCE_WAKE \
1074 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x004c) : 0x704c) 1131 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x004c) : 0x704c)
@@ -1533,7 +1590,7 @@ enum {
1533#define AR_TSFOOR_THRESHOLD 0x813c 1590#define AR_TSFOOR_THRESHOLD 0x813c
1534#define AR_TSFOOR_THRESHOLD_VAL 0x0000FFFF 1591#define AR_TSFOOR_THRESHOLD_VAL 0x0000FFFF
1535 1592
1536#define AR_PHY_ERR_EIFS_MASK 8144 1593#define AR_PHY_ERR_EIFS_MASK 0x8144
1537 1594
1538#define AR_PHY_ERR_3 0x8168 1595#define AR_PHY_ERR_3 0x8168
1539#define AR_PHY_ERR_3_COUNT 0x00FFFFFF 1596#define AR_PHY_ERR_3_COUNT 0x00FFFFFF
@@ -1599,24 +1656,26 @@ enum {
1599#define AR_FIRST_NDP_TIMER 7 1656#define AR_FIRST_NDP_TIMER 7
1600#define AR_NDP2_PERIOD 0x81a0 1657#define AR_NDP2_PERIOD 0x81a0
1601#define AR_NDP2_TIMER_MODE 0x81c0 1658#define AR_NDP2_TIMER_MODE 0x81c0
1602#define AR_NEXT_TBTT_TIMER 0x8200 1659
1603#define AR_NEXT_DMA_BEACON_ALERT 0x8204 1660#define AR_GEN_TIMERS(_i) (0x8200 + ((_i) << 2))
1604#define AR_NEXT_SWBA 0x8208 1661#define AR_NEXT_TBTT_TIMER AR_GEN_TIMERS(0)
1605#define AR_NEXT_CFP 0x8208 1662#define AR_NEXT_DMA_BEACON_ALERT AR_GEN_TIMERS(1)
1606#define AR_NEXT_HCF 0x820C 1663#define AR_NEXT_SWBA AR_GEN_TIMERS(2)
1607#define AR_NEXT_TIM 0x8210 1664#define AR_NEXT_CFP AR_GEN_TIMERS(2)
1608#define AR_NEXT_DTIM 0x8214 1665#define AR_NEXT_HCF AR_GEN_TIMERS(3)
1609#define AR_NEXT_QUIET_TIMER 0x8218 1666#define AR_NEXT_TIM AR_GEN_TIMERS(4)
1610#define AR_NEXT_NDP_TIMER 0x821C 1667#define AR_NEXT_DTIM AR_GEN_TIMERS(5)
1611 1668#define AR_NEXT_QUIET_TIMER AR_GEN_TIMERS(6)
1612#define AR_BEACON_PERIOD 0x8220 1669#define AR_NEXT_NDP_TIMER AR_GEN_TIMERS(7)
1613#define AR_DMA_BEACON_PERIOD 0x8224 1670
1614#define AR_SWBA_PERIOD 0x8228 1671#define AR_BEACON_PERIOD AR_GEN_TIMERS(8)
1615#define AR_HCF_PERIOD 0x822C 1672#define AR_DMA_BEACON_PERIOD AR_GEN_TIMERS(9)
1616#define AR_TIM_PERIOD 0x8230 1673#define AR_SWBA_PERIOD AR_GEN_TIMERS(10)
1617#define AR_DTIM_PERIOD 0x8234 1674#define AR_HCF_PERIOD AR_GEN_TIMERS(11)
1618#define AR_QUIET_PERIOD 0x8238 1675#define AR_TIM_PERIOD AR_GEN_TIMERS(12)
1619#define AR_NDP_PERIOD 0x823C 1676#define AR_DTIM_PERIOD AR_GEN_TIMERS(13)
1677#define AR_QUIET_PERIOD AR_GEN_TIMERS(14)
1678#define AR_NDP_PERIOD AR_GEN_TIMERS(15)
1620 1679
1621#define AR_TIMER_MODE 0x8240 1680#define AR_TIMER_MODE 0x8240
1622#define AR_TBTT_TIMER_EN 0x00000001 1681#define AR_TBTT_TIMER_EN 0x00000001
@@ -1730,4 +1789,32 @@ enum {
1730#define AR9271_CORE_CLOCK 117 /* clock to 117Mhz */ 1789#define AR9271_CORE_CLOCK 117 /* clock to 117Mhz */
1731#define AR9271_TARGET_BAUD_RATE 19200 /* 115200 */ 1790#define AR9271_TARGET_BAUD_RATE 19200 /* 115200 */
1732 1791
1792#define AR_AGG_WEP_ENABLE_FIX 0x00000008 /* This allows the use of AR_AGG_WEP_ENABLE */
1793#define AR_ADHOC_MCAST_KEYID_ENABLE 0x00000040 /* This bit enables the Multicast search
1794 * based on both MAC Address and Key ID.
1795 * If bit is 0, then Multicast search is
1796 * based on MAC address only.
1797 * For Merlin and above only.
1798 */
1799#define AR_AGG_WEP_ENABLE 0x00020000 /* This field enables AGG_WEP feature,
1800 * when it is enable, AGG_WEP would takes
1801 * charge of the encryption interface of
1802 * pcu_txsm.
1803 */
1804
1805#define AR9300_SM_BASE 0xa200
1806#define AR9002_PHY_AGC_CONTROL 0x9860
1807#define AR9003_PHY_AGC_CONTROL AR9300_SM_BASE + 0xc4
1808#define AR_PHY_AGC_CONTROL (AR_SREV_9300_20_OR_LATER(ah) ? AR9003_PHY_AGC_CONTROL : AR9002_PHY_AGC_CONTROL)
1809#define AR_PHY_AGC_CONTROL_CAL 0x00000001 /* do internal calibration */
1810#define AR_PHY_AGC_CONTROL_NF 0x00000002 /* do noise-floor calibration */
1811#define AR_PHY_AGC_CONTROL_OFFSET_CAL 0x00000800 /* allow offset calibration */
1812#define AR_PHY_AGC_CONTROL_ENABLE_NF 0x00008000 /* enable noise floor calibration to happen */
1813#define AR_PHY_AGC_CONTROL_FLTR_CAL 0x00010000 /* allow tx filter calibration */
1814#define AR_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000 /* don't update noise floor automatically */
1815#define AR_PHY_AGC_CONTROL_EXT_NF_PWR_MEAS 0x00040000 /* extend noise floor power measurement */
1816#define AR_PHY_AGC_CONTROL_CLC_SUCCESS 0x00080000 /* carrier leak calibration done */
1817#define AR_PHY_AGC_CONTROL_YCOK_MAX 0x000003c0
1818#define AR_PHY_AGC_CONTROL_YCOK_MAX_S 6
1819
1733#endif 1820#endif
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index f2ff18cf3e60..e23172c9caaf 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -101,6 +101,7 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
101 wmi->drv_priv = priv; 101 wmi->drv_priv = priv;
102 wmi->stopped = false; 102 wmi->stopped = false;
103 mutex_init(&wmi->op_mutex); 103 mutex_init(&wmi->op_mutex);
104 mutex_init(&wmi->multi_write_mutex);
104 init_completion(&wmi->cmd_wait); 105 init_completion(&wmi->cmd_wait);
105 106
106 return wmi; 107 return wmi;
@@ -128,7 +129,7 @@ void ath9k_wmi_tasklet(unsigned long data)
128 void *wmi_event; 129 void *wmi_event;
129 unsigned long flags; 130 unsigned long flags;
130#ifdef CONFIG_ATH9K_HTC_DEBUGFS 131#ifdef CONFIG_ATH9K_HTC_DEBUGFS
131 u32 txrate; 132 __be32 txrate;
132#endif 133#endif
133 134
134 spin_lock_irqsave(&priv->wmi->wmi_lock, flags); 135 spin_lock_irqsave(&priv->wmi->wmi_lock, flags);
@@ -203,6 +204,14 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
203 return; 204 return;
204 } 205 }
205 206
207 /* Check if there has been a timeout. */
208 spin_lock(&wmi->wmi_lock);
209 if (cmd_id != wmi->last_cmd_id) {
210 spin_unlock(&wmi->wmi_lock);
211 goto free_skb;
212 }
213 spin_unlock(&wmi->wmi_lock);
214
206 /* WMI command response */ 215 /* WMI command response */
207 ath9k_wmi_rsp_callback(wmi, skb); 216 ath9k_wmi_rsp_callback(wmi, skb);
208 217
@@ -265,6 +274,10 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
265 struct sk_buff *skb; 274 struct sk_buff *skb;
266 u8 *data; 275 u8 *data;
267 int time_left, ret = 0; 276 int time_left, ret = 0;
277 unsigned long flags;
278
279 if (wmi->drv_priv->op_flags & OP_UNPLUGGED)
280 return 0;
268 281
269 if (!wmi) 282 if (!wmi)
270 return -EINVAL; 283 return -EINVAL;
@@ -292,6 +305,10 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
292 wmi->cmd_rsp_buf = rsp_buf; 305 wmi->cmd_rsp_buf = rsp_buf;
293 wmi->cmd_rsp_len = rsp_len; 306 wmi->cmd_rsp_len = rsp_len;
294 307
308 spin_lock_irqsave(&wmi->wmi_lock, flags);
309 wmi->last_cmd_id = cmd_id;
310 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
311
295 ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len); 312 ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len);
296 if (ret) 313 if (ret)
297 goto out; 314 goto out;
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
index 39ef926f27c2..765db5faa2d3 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.h
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -19,7 +19,7 @@
19 19
20 20
21struct wmi_event_txrate { 21struct wmi_event_txrate {
22 u32 txrate; 22 __be32 txrate;
23 struct { 23 struct {
24 u8 rssi_thresh; 24 u8 rssi_thresh;
25 u8 per; 25 u8 per;
@@ -27,8 +27,8 @@ struct wmi_event_txrate {
27} __packed; 27} __packed;
28 28
29struct wmi_cmd_hdr { 29struct wmi_cmd_hdr {
30 u16 command_id; 30 __be16 command_id;
31 u16 seq_no; 31 __be16 seq_no;
32} __packed; 32} __packed;
33 33
34struct wmi_swba { 34struct wmi_swba {
@@ -84,12 +84,20 @@ enum wmi_event_id {
84 WMI_TXRATE_EVENTID, 84 WMI_TXRATE_EVENTID,
85}; 85};
86 86
87#define MAX_CMD_NUMBER 62
88
89struct register_write {
90 __be32 reg;
91 __be32 val;
92};
93
87struct wmi { 94struct wmi {
88 struct ath9k_htc_priv *drv_priv; 95 struct ath9k_htc_priv *drv_priv;
89 struct htc_target *htc; 96 struct htc_target *htc;
90 enum htc_endpoint_id ctrl_epid; 97 enum htc_endpoint_id ctrl_epid;
91 struct mutex op_mutex; 98 struct mutex op_mutex;
92 struct completion cmd_wait; 99 struct completion cmd_wait;
100 enum wmi_cmd_id last_cmd_id;
93 u16 tx_seq_id; 101 u16 tx_seq_id;
94 u8 *cmd_rsp_buf; 102 u8 *cmd_rsp_buf;
95 u32 cmd_rsp_len; 103 u32 cmd_rsp_len;
@@ -97,6 +105,11 @@ struct wmi {
97 105
98 struct sk_buff *wmi_skb; 106 struct sk_buff *wmi_skb;
99 spinlock_t wmi_lock; 107 spinlock_t wmi_lock;
108
109 atomic_t mwrite_cnt;
110 struct register_write multi_write[MAX_CMD_NUMBER];
111 u32 multi_write_idx;
112 struct mutex multi_write_mutex;
100}; 113};
101 114
102struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv); 115struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv);
@@ -113,14 +126,14 @@ void ath9k_wmi_tasklet(unsigned long data);
113 do { \ 126 do { \
114 ret = ath9k_wmi_cmd(priv->wmi, _wmi_cmd, NULL, 0, \ 127 ret = ath9k_wmi_cmd(priv->wmi, _wmi_cmd, NULL, 0, \
115 (u8 *) &cmd_rsp, \ 128 (u8 *) &cmd_rsp, \
116 sizeof(cmd_rsp), HZ); \ 129 sizeof(cmd_rsp), HZ*2); \
117 } while (0) 130 } while (0)
118 131
119#define WMI_CMD_BUF(_wmi_cmd, _buf) \ 132#define WMI_CMD_BUF(_wmi_cmd, _buf) \
120 do { \ 133 do { \
121 ret = ath9k_wmi_cmd(priv->wmi, _wmi_cmd, \ 134 ret = ath9k_wmi_cmd(priv->wmi, _wmi_cmd, \
122 (u8 *) _buf, sizeof(*_buf), \ 135 (u8 *) _buf, sizeof(*_buf), \
123 &cmd_rsp, sizeof(cmd_rsp), HZ); \ 136 &cmd_rsp, sizeof(cmd_rsp), HZ*2); \
124 } while (0) 137 } while (0)
125 138
126#endif /* WMI_H */ 139#endif /* WMI_H */
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 02df4cbf179f..3db19172b43b 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -15,10 +15,11 @@
15 */ 15 */
16 16
17#include "ath9k.h" 17#include "ath9k.h"
18#include "ar9003_mac.h"
18 19
19#define BITS_PER_BYTE 8 20#define BITS_PER_BYTE 8
20#define OFDM_PLCP_BITS 22 21#define OFDM_PLCP_BITS 22
21#define HT_RC_2_MCS(_rc) ((_rc) & 0x0f) 22#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
22#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1) 23#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
23#define L_STF 8 24#define L_STF 8
24#define L_LTF 8 25#define L_LTF 8
@@ -33,7 +34,7 @@
33 34
34#define OFDM_SIFS_TIME 16 35#define OFDM_SIFS_TIME 16
35 36
36static u32 bits_per_symbol[][2] = { 37static u16 bits_per_symbol[][2] = {
37 /* 20MHz 40MHz */ 38 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */ 39 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */ 40 { 52, 108 }, /* 1: QPSK 1/2 */
@@ -43,14 +44,6 @@ static u32 bits_per_symbol[][2] = {
43 { 208, 432 }, /* 5: 64-QAM 2/3 */ 44 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */ 45 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */ 46 { 260, 540 }, /* 7: 64-QAM 5/6 */
46 { 52, 108 }, /* 8: BPSK */
47 { 104, 216 }, /* 9: QPSK 1/2 */
48 { 156, 324 }, /* 10: QPSK 3/4 */
49 { 208, 432 }, /* 11: 16-QAM 1/2 */
50 { 312, 648 }, /* 12: 16-QAM 3/4 */
51 { 416, 864 }, /* 13: 64-QAM 2/3 */
52 { 468, 972 }, /* 14: 64-QAM 3/4 */
53 { 520, 1080 }, /* 15: 64-QAM 5/6 */
54}; 47};
55 48
56#define IS_HT_RATE(_rate) ((_rate) & 0x80) 49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
@@ -70,28 +63,39 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
70 int nbad, int txok, bool update_rc); 63 int nbad, int txok, bool update_rc);
71 64
72enum { 65enum {
73 MCS_DEFAULT, 66 MCS_HT20,
67 MCS_HT20_SGI,
74 MCS_HT40, 68 MCS_HT40,
75 MCS_HT40_SGI, 69 MCS_HT40_SGI,
76}; 70};
77 71
78static int ath_max_4ms_framelen[3][16] = { 72static int ath_max_4ms_framelen[4][32] = {
79 [MCS_DEFAULT] = { 73 [MCS_HT20] = {
80 3216, 6434, 9650, 12868, 19304, 25740, 28956, 32180, 74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
81 6430, 12860, 19300, 25736, 38600, 51472, 57890, 64320, 75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
82 }, 84 },
83 [MCS_HT40] = { 85 [MCS_HT40] = {
84 6684, 13368, 20052, 26738, 40104, 53476, 60156, 66840, 86 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
85 13360, 26720, 40080, 53440, 80160, 106880, 120240, 133600, 87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
86 }, 90 },
87 [MCS_HT40_SGI] = { 91 [MCS_HT40_SGI] = {
88 /* TODO: Only MCS 7 and 15 updated, recalculate the rest */ 92 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
89 6684, 13368, 20052, 26738, 40104, 53476, 60156, 74200, 93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
90 13360, 26720, 40080, 53440, 80160, 106880, 120240, 148400, 94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
91 } 96 }
92}; 97};
93 98
94
95/*********************/ 99/*********************/
96/* Aggregation logic */ 100/* Aggregation logic */
97/*********************/ 101/*********************/
@@ -261,25 +265,46 @@ static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
261 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); 265 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
262} 266}
263 267
264static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf) 268static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
265{ 269{
266 struct ath_buf *tbf; 270 struct ath_buf *bf = NULL;
267 271
268 spin_lock_bh(&sc->tx.txbuflock); 272 spin_lock_bh(&sc->tx.txbuflock);
269 if (WARN_ON(list_empty(&sc->tx.txbuf))) { 273
274 if (unlikely(list_empty(&sc->tx.txbuf))) {
270 spin_unlock_bh(&sc->tx.txbuflock); 275 spin_unlock_bh(&sc->tx.txbuflock);
271 return NULL; 276 return NULL;
272 } 277 }
273 tbf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); 278
274 list_del(&tbf->list); 279 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
280 list_del(&bf->list);
281
275 spin_unlock_bh(&sc->tx.txbuflock); 282 spin_unlock_bh(&sc->tx.txbuflock);
276 283
284 return bf;
285}
286
287static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
288{
289 spin_lock_bh(&sc->tx.txbuflock);
290 list_add_tail(&bf->list, &sc->tx.txbuf);
291 spin_unlock_bh(&sc->tx.txbuflock);
292}
293
294static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
295{
296 struct ath_buf *tbf;
297
298 tbf = ath_tx_get_buffer(sc);
299 if (WARN_ON(!tbf))
300 return NULL;
301
277 ATH_TXBUF_RESET(tbf); 302 ATH_TXBUF_RESET(tbf);
278 303
279 tbf->aphy = bf->aphy; 304 tbf->aphy = bf->aphy;
280 tbf->bf_mpdu = bf->bf_mpdu; 305 tbf->bf_mpdu = bf->bf_mpdu;
281 tbf->bf_buf_addr = bf->bf_buf_addr; 306 tbf->bf_buf_addr = bf->bf_buf_addr;
282 *(tbf->bf_desc) = *(bf->bf_desc); 307 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
283 tbf->bf_state = bf->bf_state; 308 tbf->bf_state = bf->bf_state;
284 tbf->bf_dmacontext = bf->bf_dmacontext; 309 tbf->bf_dmacontext = bf->bf_dmacontext;
285 310
@@ -359,7 +384,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
359 acked_cnt++; 384 acked_cnt++;
360 } else { 385 } else {
361 if (!(tid->state & AGGR_CLEANUP) && 386 if (!(tid->state & AGGR_CLEANUP) &&
362 ts->ts_flags != ATH9K_TX_SW_ABORTED) { 387 !bf_last->bf_tx_aborted) {
363 if (bf->bf_retries < ATH_MAX_SW_RETRIES) { 388 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
364 ath_tx_set_retry(sc, txq, bf); 389 ath_tx_set_retry(sc, txq, bf);
365 txpending = 1; 390 txpending = 1;
@@ -378,7 +403,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
378 } 403 }
379 } 404 }
380 405
381 if (bf_next == NULL) { 406 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
407 bf_next == NULL) {
382 /* 408 /*
383 * Make sure the last desc is reclaimed if it 409 * Make sure the last desc is reclaimed if it
384 * not a holding desc. 410 * not a holding desc.
@@ -412,36 +438,43 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
412 !txfail, sendbar); 438 !txfail, sendbar);
413 } else { 439 } else {
414 /* retry the un-acked ones */ 440 /* retry the un-acked ones */
415 if (bf->bf_next == NULL && bf_last->bf_stale) { 441 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
416 struct ath_buf *tbf; 442 if (bf->bf_next == NULL && bf_last->bf_stale) {
417 443 struct ath_buf *tbf;
418 tbf = ath_clone_txbuf(sc, bf_last); 444
419 /* 445 tbf = ath_clone_txbuf(sc, bf_last);
420 * Update tx baw and complete the frame with 446 /*
421 * failed status if we run out of tx buf 447 * Update tx baw and complete the
422 */ 448 * frame with failed status if we
423 if (!tbf) { 449 * run out of tx buf.
424 spin_lock_bh(&txq->axq_lock); 450 */
425 ath_tx_update_baw(sc, tid, 451 if (!tbf) {
426 bf->bf_seqno); 452 spin_lock_bh(&txq->axq_lock);
427 spin_unlock_bh(&txq->axq_lock); 453 ath_tx_update_baw(sc, tid,
428 454 bf->bf_seqno);
429 bf->bf_state.bf_type |= BUF_XRETRY; 455 spin_unlock_bh(&txq->axq_lock);
430 ath_tx_rc_status(bf, ts, nbad, 456
431 0, false); 457 bf->bf_state.bf_type |=
432 ath_tx_complete_buf(sc, bf, txq, 458 BUF_XRETRY;
433 &bf_head, ts, 0, 0); 459 ath_tx_rc_status(bf, ts, nbad,
434 break; 460 0, false);
461 ath_tx_complete_buf(sc, bf, txq,
462 &bf_head,
463 ts, 0, 0);
464 break;
465 }
466
467 ath9k_hw_cleartxdesc(sc->sc_ah,
468 tbf->bf_desc);
469 list_add_tail(&tbf->list, &bf_head);
470 } else {
471 /*
472 * Clear descriptor status words for
473 * software retry
474 */
475 ath9k_hw_cleartxdesc(sc->sc_ah,
476 bf->bf_desc);
435 } 477 }
436
437 ath9k_hw_cleartxdesc(sc->sc_ah, tbf->bf_desc);
438 list_add_tail(&tbf->list, &bf_head);
439 } else {
440 /*
441 * Clear descriptor status words for
442 * software retry
443 */
444 ath9k_hw_cleartxdesc(sc->sc_ah, bf->bf_desc);
445 } 478 }
446 479
447 /* 480 /*
@@ -509,12 +542,13 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
509 break; 542 break;
510 } 543 }
511 544
512 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) 545 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
513 modeidx = MCS_HT40_SGI;
514 else if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
515 modeidx = MCS_HT40; 546 modeidx = MCS_HT40;
516 else 547 else
517 modeidx = MCS_DEFAULT; 548 modeidx = MCS_HT20;
549
550 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
551 modeidx++;
518 552
519 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx]; 553 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
520 max_4ms_framelen = min(max_4ms_framelen, frmlen); 554 max_4ms_framelen = min(max_4ms_framelen, frmlen);
@@ -559,7 +593,7 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
559 u32 nsymbits, nsymbols; 593 u32 nsymbits, nsymbols;
560 u16 minlen; 594 u16 minlen;
561 u8 flags, rix; 595 u8 flags, rix;
562 int width, half_gi, ndelim, mindelim; 596 int width, streams, half_gi, ndelim, mindelim;
563 597
564 /* Select standard number of delimiters based on frame length alone */ 598 /* Select standard number of delimiters based on frame length alone */
565 ndelim = ATH_AGGR_GET_NDELIM(frmlen); 599 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
@@ -599,7 +633,8 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
599 if (nsymbols == 0) 633 if (nsymbols == 0)
600 nsymbols = 1; 634 nsymbols = 1;
601 635
602 nsymbits = bits_per_symbol[rix][width]; 636 streams = HT_RC_2_STREAMS(rix);
637 nsymbits = bits_per_symbol[rix % 8][width] * streams;
603 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE; 638 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
604 639
605 if (frmlen < minlen) { 640 if (frmlen < minlen) {
@@ -665,7 +700,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
665 bpad = PADBYTES(al_delta) + (ndelim << 2); 700 bpad = PADBYTES(al_delta) + (ndelim << 2);
666 701
667 bf->bf_next = NULL; 702 bf->bf_next = NULL;
668 bf->bf_desc->ds_link = 0; 703 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
669 704
670 /* link buffers of this frame to the aggregate */ 705 /* link buffers of this frame to the aggregate */
671 ath_tx_addto_baw(sc, tid, bf); 706 ath_tx_addto_baw(sc, tid, bf);
@@ -673,7 +708,8 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
673 list_move_tail(&bf->list, bf_q); 708 list_move_tail(&bf->list, bf_q);
674 if (bf_prev) { 709 if (bf_prev) {
675 bf_prev->bf_next = bf; 710 bf_prev->bf_next = bf;
676 bf_prev->bf_desc->ds_link = bf->bf_daddr; 711 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
712 bf->bf_daddr);
677 } 713 }
678 bf_prev = bf; 714 bf_prev = bf;
679 715
@@ -853,7 +889,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
853 struct ath_hw *ah = sc->sc_ah; 889 struct ath_hw *ah = sc->sc_ah;
854 struct ath_common *common = ath9k_hw_common(ah); 890 struct ath_common *common = ath9k_hw_common(ah);
855 struct ath9k_tx_queue_info qi; 891 struct ath9k_tx_queue_info qi;
856 int qnum; 892 int qnum, i;
857 893
858 memset(&qi, 0, sizeof(qi)); 894 memset(&qi, 0, sizeof(qi));
859 qi.tqi_subtype = subtype; 895 qi.tqi_subtype = subtype;
@@ -877,11 +913,16 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
877 * The UAPSD queue is an exception, since we take a desc- 913 * The UAPSD queue is an exception, since we take a desc-
878 * based intr on the EOSP frames. 914 * based intr on the EOSP frames.
879 */ 915 */
880 if (qtype == ATH9K_TX_QUEUE_UAPSD) 916 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
881 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE; 917 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
882 else 918 TXQ_FLAG_TXERRINT_ENABLE;
883 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | 919 } else {
884 TXQ_FLAG_TXDESCINT_ENABLE; 920 if (qtype == ATH9K_TX_QUEUE_UAPSD)
921 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
922 else
923 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
924 TXQ_FLAG_TXDESCINT_ENABLE;
925 }
885 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi); 926 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
886 if (qnum == -1) { 927 if (qnum == -1) {
887 /* 928 /*
@@ -908,6 +949,11 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
908 txq->axq_depth = 0; 949 txq->axq_depth = 0;
909 txq->axq_tx_inprogress = false; 950 txq->axq_tx_inprogress = false;
910 sc->tx.txqsetup |= 1<<qnum; 951 sc->tx.txqsetup |= 1<<qnum;
952
953 txq->txq_headidx = txq->txq_tailidx = 0;
954 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
955 INIT_LIST_HEAD(&txq->txq_fifo[i]);
956 INIT_LIST_HEAD(&txq->txq_fifo_pending);
911 } 957 }
912 return &sc->tx.txq[qnum]; 958 return &sc->tx.txq[qnum];
913} 959}
@@ -1035,36 +1081,52 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1035 struct ath_tx_status ts; 1081 struct ath_tx_status ts;
1036 1082
1037 memset(&ts, 0, sizeof(ts)); 1083 memset(&ts, 0, sizeof(ts));
1038 if (!retry_tx)
1039 ts.ts_flags = ATH9K_TX_SW_ABORTED;
1040
1041 INIT_LIST_HEAD(&bf_head); 1084 INIT_LIST_HEAD(&bf_head);
1042 1085
1043 for (;;) { 1086 for (;;) {
1044 spin_lock_bh(&txq->axq_lock); 1087 spin_lock_bh(&txq->axq_lock);
1045 1088
1046 if (list_empty(&txq->axq_q)) { 1089 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1047 txq->axq_link = NULL; 1090 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1048 spin_unlock_bh(&txq->axq_lock); 1091 txq->txq_headidx = txq->txq_tailidx = 0;
1049 break; 1092 spin_unlock_bh(&txq->axq_lock);
1050 } 1093 break;
1051 1094 } else {
1052 bf = list_first_entry(&txq->axq_q, struct ath_buf, list); 1095 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1096 struct ath_buf, list);
1097 }
1098 } else {
1099 if (list_empty(&txq->axq_q)) {
1100 txq->axq_link = NULL;
1101 spin_unlock_bh(&txq->axq_lock);
1102 break;
1103 }
1104 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1105 list);
1053 1106
1054 if (bf->bf_stale) { 1107 if (bf->bf_stale) {
1055 list_del(&bf->list); 1108 list_del(&bf->list);
1056 spin_unlock_bh(&txq->axq_lock); 1109 spin_unlock_bh(&txq->axq_lock);
1057 1110
1058 spin_lock_bh(&sc->tx.txbuflock); 1111 ath_tx_return_buffer(sc, bf);
1059 list_add_tail(&bf->list, &sc->tx.txbuf); 1112 continue;
1060 spin_unlock_bh(&sc->tx.txbuflock); 1113 }
1061 continue;
1062 } 1114 }
1063 1115
1064 lastbf = bf->bf_lastbf; 1116 lastbf = bf->bf_lastbf;
1117 if (!retry_tx)
1118 lastbf->bf_tx_aborted = true;
1119
1120 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1121 list_cut_position(&bf_head,
1122 &txq->txq_fifo[txq->txq_tailidx],
1123 &lastbf->list);
1124 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1125 } else {
1126 /* remove ath_buf's of the same mpdu from txq */
1127 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1128 }
1065 1129
1066 /* remove ath_buf's of the same mpdu from txq */
1067 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1068 txq->axq_depth--; 1130 txq->axq_depth--;
1069 1131
1070 spin_unlock_bh(&txq->axq_lock); 1132 spin_unlock_bh(&txq->axq_lock);
@@ -1087,6 +1149,27 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1087 spin_unlock_bh(&txq->axq_lock); 1149 spin_unlock_bh(&txq->axq_lock);
1088 } 1150 }
1089 } 1151 }
1152
1153 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1154 spin_lock_bh(&txq->axq_lock);
1155 while (!list_empty(&txq->txq_fifo_pending)) {
1156 bf = list_first_entry(&txq->txq_fifo_pending,
1157 struct ath_buf, list);
1158 list_cut_position(&bf_head,
1159 &txq->txq_fifo_pending,
1160 &bf->bf_lastbf->list);
1161 spin_unlock_bh(&txq->axq_lock);
1162
1163 if (bf_isampdu(bf))
1164 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1165 &ts, 0);
1166 else
1167 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1168 &ts, 0, 0);
1169 spin_lock_bh(&txq->axq_lock);
1170 }
1171 spin_unlock_bh(&txq->axq_lock);
1172 }
1090} 1173}
1091 1174
1092void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) 1175void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
@@ -1224,44 +1307,47 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1224 1307
1225 bf = list_first_entry(head, struct ath_buf, list); 1308 bf = list_first_entry(head, struct ath_buf, list);
1226 1309
1227 list_splice_tail_init(head, &txq->axq_q);
1228 txq->axq_depth++;
1229
1230 ath_print(common, ATH_DBG_QUEUE, 1310 ath_print(common, ATH_DBG_QUEUE,
1231 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth); 1311 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
1232 1312
1233 if (txq->axq_link == NULL) { 1313 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1314 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1315 list_splice_tail_init(head, &txq->txq_fifo_pending);
1316 return;
1317 }
1318 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1319 ath_print(common, ATH_DBG_XMIT,
1320 "Initializing tx fifo %d which "
1321 "is non-empty\n",
1322 txq->txq_headidx);
1323 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1324 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1325 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1234 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1326 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1235 ath_print(common, ATH_DBG_XMIT, 1327 ath_print(common, ATH_DBG_XMIT,
1236 "TXDP[%u] = %llx (%p)\n", 1328 "TXDP[%u] = %llx (%p)\n",
1237 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); 1329 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1238 } else { 1330 } else {
1239 *txq->axq_link = bf->bf_daddr; 1331 list_splice_tail_init(head, &txq->axq_q);
1240 ath_print(common, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
1241 txq->axq_qnum, txq->axq_link,
1242 ito64(bf->bf_daddr), bf->bf_desc);
1243 }
1244 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
1245 ath9k_hw_txstart(ah, txq->axq_qnum);
1246}
1247
1248static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
1249{
1250 struct ath_buf *bf = NULL;
1251 1332
1252 spin_lock_bh(&sc->tx.txbuflock); 1333 if (txq->axq_link == NULL) {
1253 1334 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1254 if (unlikely(list_empty(&sc->tx.txbuf))) { 1335 ath_print(common, ATH_DBG_XMIT,
1255 spin_unlock_bh(&sc->tx.txbuflock); 1336 "TXDP[%u] = %llx (%p)\n",
1256 return NULL; 1337 txq->axq_qnum, ito64(bf->bf_daddr),
1338 bf->bf_desc);
1339 } else {
1340 *txq->axq_link = bf->bf_daddr;
1341 ath_print(common, ATH_DBG_XMIT,
1342 "link[%u] (%p)=%llx (%p)\n",
1343 txq->axq_qnum, txq->axq_link,
1344 ito64(bf->bf_daddr), bf->bf_desc);
1345 }
1346 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1347 &txq->axq_link);
1348 ath9k_hw_txstart(ah, txq->axq_qnum);
1257 } 1349 }
1258 1350 txq->axq_depth++;
1259 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
1260 list_del(&bf->list);
1261
1262 spin_unlock_bh(&sc->tx.txbuflock);
1263
1264 return bf;
1265} 1351}
1266 1352
1267static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, 1353static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
@@ -1408,8 +1494,7 @@ static void assign_aggr_tid_seqno(struct sk_buff *skb,
1408 INCR(tid->seq_next, IEEE80211_SEQ_MAX); 1494 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1409} 1495}
1410 1496
1411static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb, 1497static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
1412 struct ath_txq *txq)
1413{ 1498{
1414 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1499 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1415 int flags = 0; 1500 int flags = 0;
@@ -1420,6 +1505,9 @@ static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
1420 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) 1505 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1421 flags |= ATH9K_TXDESC_NOACK; 1506 flags |= ATH9K_TXDESC_NOACK;
1422 1507
1508 if (use_ldpc)
1509 flags |= ATH9K_TXDESC_LDPC;
1510
1423 return flags; 1511 return flags;
1424} 1512}
1425 1513
@@ -1438,8 +1526,9 @@ static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1438 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen; 1526 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
1439 1527
1440 /* find number of symbols: PLCP + data */ 1528 /* find number of symbols: PLCP + data */
1529 streams = HT_RC_2_STREAMS(rix);
1441 nbits = (pktlen << 3) + OFDM_PLCP_BITS; 1530 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
1442 nsymbits = bits_per_symbol[rix][width]; 1531 nsymbits = bits_per_symbol[rix % 8][width] * streams;
1443 nsymbols = (nbits + nsymbits - 1) / nsymbits; 1532 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1444 1533
1445 if (!half_gi) 1534 if (!half_gi)
@@ -1448,7 +1537,6 @@ static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1448 duration = SYMBOL_TIME_HALFGI(nsymbols); 1537 duration = SYMBOL_TIME_HALFGI(nsymbols);
1449 1538
1450 /* addup duration for legacy/ht training and signal fields */ 1539 /* addup duration for legacy/ht training and signal fields */
1451 streams = HT_RC_2_STREAMS(rix);
1452 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); 1540 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1453 1541
1454 return duration; 1542 return duration;
@@ -1519,6 +1607,8 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1519 series[i].Rate = rix | 0x80; 1607 series[i].Rate = rix | 0x80;
1520 series[i].PktDuration = ath_pkt_duration(sc, rix, bf, 1608 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1521 is_40, is_sgi, is_sp); 1609 is_40, is_sgi, is_sp);
1610 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1611 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
1522 continue; 1612 continue;
1523 } 1613 }
1524 1614
@@ -1571,6 +1661,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1571 int hdrlen; 1661 int hdrlen;
1572 __le16 fc; 1662 __le16 fc;
1573 int padpos, padsize; 1663 int padpos, padsize;
1664 bool use_ldpc = false;
1574 1665
1575 tx_info->pad[0] = 0; 1666 tx_info->pad[0] = 0;
1576 switch (txctl->frame_type) { 1667 switch (txctl->frame_type) {
@@ -1597,10 +1688,13 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1597 bf->bf_frmlen -= padsize; 1688 bf->bf_frmlen -= padsize;
1598 } 1689 }
1599 1690
1600 if (conf_is_ht(&hw->conf)) 1691 if (conf_is_ht(&hw->conf)) {
1601 bf->bf_state.bf_type |= BUF_HT; 1692 bf->bf_state.bf_type |= BUF_HT;
1693 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1694 use_ldpc = true;
1695 }
1602 1696
1603 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq); 1697 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
1604 1698
1605 bf->bf_keytype = get_hw_crypto_keytype(skb); 1699 bf->bf_keytype = get_hw_crypto_keytype(skb);
1606 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) { 1700 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
@@ -1659,8 +1753,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1659 list_add_tail(&bf->list, &bf_head); 1753 list_add_tail(&bf->list, &bf_head);
1660 1754
1661 ds = bf->bf_desc; 1755 ds = bf->bf_desc;
1662 ds->ds_link = 0; 1756 ath9k_hw_set_desc_link(ah, ds, 0);
1663 ds->ds_data = bf->bf_buf_addr;
1664 1757
1665 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER, 1758 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1666 bf->bf_keyix, bf->bf_keytype, bf->bf_flags); 1759 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
@@ -1669,7 +1762,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1669 skb->len, /* segment length */ 1762 skb->len, /* segment length */
1670 true, /* first segment */ 1763 true, /* first segment */
1671 true, /* last segment */ 1764 true, /* last segment */
1672 ds); /* first descriptor */ 1765 ds, /* first descriptor */
1766 bf->bf_buf_addr,
1767 txctl->txq->axq_qnum);
1673 1768
1674 spin_lock_bh(&txctl->txq->axq_lock); 1769 spin_lock_bh(&txctl->txq->axq_lock);
1675 1770
@@ -1738,9 +1833,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1738 } 1833 }
1739 spin_unlock_bh(&txq->axq_lock); 1834 spin_unlock_bh(&txq->axq_lock);
1740 1835
1741 spin_lock_bh(&sc->tx.txbuflock); 1836 ath_tx_return_buffer(sc, bf);
1742 list_add_tail(&bf->list, &sc->tx.txbuf);
1743 spin_unlock_bh(&sc->tx.txbuflock);
1744 1837
1745 return r; 1838 return r;
1746 } 1839 }
@@ -1896,7 +1989,7 @@ static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
1896 int nbad = 0; 1989 int nbad = 0;
1897 int isaggr = 0; 1990 int isaggr = 0;
1898 1991
1899 if (ts->ts_flags == ATH9K_TX_SW_ABORTED) 1992 if (bf->bf_tx_aborted)
1900 return 0; 1993 return 0;
1901 1994
1902 isaggr = bf_isaggr(bf); 1995 isaggr = bf_isaggr(bf);
@@ -2054,13 +2147,12 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2054 txq->axq_depth--; 2147 txq->axq_depth--;
2055 txok = !(ts.ts_status & ATH9K_TXERR_MASK); 2148 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
2056 txq->axq_tx_inprogress = false; 2149 txq->axq_tx_inprogress = false;
2150 if (bf_held)
2151 list_del(&bf_held->list);
2057 spin_unlock_bh(&txq->axq_lock); 2152 spin_unlock_bh(&txq->axq_lock);
2058 2153
2059 if (bf_held) { 2154 if (bf_held)
2060 spin_lock_bh(&sc->tx.txbuflock); 2155 ath_tx_return_buffer(sc, bf_held);
2061 list_move_tail(&bf_held->list, &sc->tx.txbuf);
2062 spin_unlock_bh(&sc->tx.txbuflock);
2063 }
2064 2156
2065 if (!bf_isampdu(bf)) { 2157 if (!bf_isampdu(bf)) {
2066 /* 2158 /*
@@ -2138,10 +2230,121 @@ void ath_tx_tasklet(struct ath_softc *sc)
2138 } 2230 }
2139} 2231}
2140 2232
2233void ath_tx_edma_tasklet(struct ath_softc *sc)
2234{
2235 struct ath_tx_status txs;
2236 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2237 struct ath_hw *ah = sc->sc_ah;
2238 struct ath_txq *txq;
2239 struct ath_buf *bf, *lastbf;
2240 struct list_head bf_head;
2241 int status;
2242 int txok;
2243
2244 for (;;) {
2245 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2246 if (status == -EINPROGRESS)
2247 break;
2248 if (status == -EIO) {
2249 ath_print(common, ATH_DBG_XMIT,
2250 "Error processing tx status\n");
2251 break;
2252 }
2253
2254 /* Skip beacon completions */
2255 if (txs.qid == sc->beacon.beaconq)
2256 continue;
2257
2258 txq = &sc->tx.txq[txs.qid];
2259
2260 spin_lock_bh(&txq->axq_lock);
2261 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2262 spin_unlock_bh(&txq->axq_lock);
2263 return;
2264 }
2265
2266 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2267 struct ath_buf, list);
2268 lastbf = bf->bf_lastbf;
2269
2270 INIT_LIST_HEAD(&bf_head);
2271 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2272 &lastbf->list);
2273 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2274 txq->axq_depth--;
2275 txq->axq_tx_inprogress = false;
2276 spin_unlock_bh(&txq->axq_lock);
2277
2278 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2279
2280 if (!bf_isampdu(bf)) {
2281 bf->bf_retries = txs.ts_longretry;
2282 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2283 bf->bf_state.bf_type |= BUF_XRETRY;
2284 ath_tx_rc_status(bf, &txs, 0, txok, true);
2285 }
2286
2287 if (bf_isampdu(bf))
2288 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2289 else
2290 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2291 &txs, txok, 0);
2292
2293 ath_wake_mac80211_queue(sc, txq);
2294
2295 spin_lock_bh(&txq->axq_lock);
2296 if (!list_empty(&txq->txq_fifo_pending)) {
2297 INIT_LIST_HEAD(&bf_head);
2298 bf = list_first_entry(&txq->txq_fifo_pending,
2299 struct ath_buf, list);
2300 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2301 &bf->bf_lastbf->list);
2302 ath_tx_txqaddbuf(sc, txq, &bf_head);
2303 } else if (sc->sc_flags & SC_OP_TXAGGR)
2304 ath_txq_schedule(sc, txq);
2305 spin_unlock_bh(&txq->axq_lock);
2306 }
2307}
2308
2141/*****************/ 2309/*****************/
2142/* Init, Cleanup */ 2310/* Init, Cleanup */
2143/*****************/ 2311/*****************/
2144 2312
2313static int ath_txstatus_setup(struct ath_softc *sc, int size)
2314{
2315 struct ath_descdma *dd = &sc->txsdma;
2316 u8 txs_len = sc->sc_ah->caps.txs_len;
2317
2318 dd->dd_desc_len = size * txs_len;
2319 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2320 &dd->dd_desc_paddr, GFP_KERNEL);
2321 if (!dd->dd_desc)
2322 return -ENOMEM;
2323
2324 return 0;
2325}
2326
2327static int ath_tx_edma_init(struct ath_softc *sc)
2328{
2329 int err;
2330
2331 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2332 if (!err)
2333 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2334 sc->txsdma.dd_desc_paddr,
2335 ATH_TXSTATUS_RING_SIZE);
2336
2337 return err;
2338}
2339
2340static void ath_tx_edma_cleanup(struct ath_softc *sc)
2341{
2342 struct ath_descdma *dd = &sc->txsdma;
2343
2344 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2345 dd->dd_desc_paddr);
2346}
2347
2145int ath_tx_init(struct ath_softc *sc, int nbufs) 2348int ath_tx_init(struct ath_softc *sc, int nbufs)
2146{ 2349{
2147 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2350 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -2150,7 +2353,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
2150 spin_lock_init(&sc->tx.txbuflock); 2353 spin_lock_init(&sc->tx.txbuflock);
2151 2354
2152 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf, 2355 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2153 "tx", nbufs, 1); 2356 "tx", nbufs, 1, 1);
2154 if (error != 0) { 2357 if (error != 0) {
2155 ath_print(common, ATH_DBG_FATAL, 2358 ath_print(common, ATH_DBG_FATAL,
2156 "Failed to allocate tx descriptors: %d\n", error); 2359 "Failed to allocate tx descriptors: %d\n", error);
@@ -2158,7 +2361,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
2158 } 2361 }
2159 2362
2160 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf, 2363 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2161 "beacon", ATH_BCBUF, 1); 2364 "beacon", ATH_BCBUF, 1, 1);
2162 if (error != 0) { 2365 if (error != 0) {
2163 ath_print(common, ATH_DBG_FATAL, 2366 ath_print(common, ATH_DBG_FATAL,
2164 "Failed to allocate beacon descriptors: %d\n", error); 2367 "Failed to allocate beacon descriptors: %d\n", error);
@@ -2167,6 +2370,12 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
2167 2370
2168 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work); 2371 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2169 2372
2373 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2374 error = ath_tx_edma_init(sc);
2375 if (error)
2376 goto err;
2377 }
2378
2170err: 2379err:
2171 if (error != 0) 2380 if (error != 0)
2172 ath_tx_cleanup(sc); 2381 ath_tx_cleanup(sc);
@@ -2181,6 +2390,9 @@ void ath_tx_cleanup(struct ath_softc *sc)
2181 2390
2182 if (sc->tx.txdma.dd_desc_len != 0) 2391 if (sc->tx.txdma.dd_desc_len != 0)
2183 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf); 2392 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
2393
2394 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2395 ath_tx_edma_cleanup(sc);
2184} 2396}
2185 2397
2186void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) 2398void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 997303bcf4ae..7965b70efbab 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4571,6 +4571,23 @@ static void b43_op_sw_scan_complete_notifier(struct ieee80211_hw *hw)
4571 mutex_unlock(&wl->mutex); 4571 mutex_unlock(&wl->mutex);
4572} 4572}
4573 4573
4574static int b43_op_get_survey(struct ieee80211_hw *hw, int idx,
4575 struct survey_info *survey)
4576{
4577 struct b43_wl *wl = hw_to_b43_wl(hw);
4578 struct b43_wldev *dev = wl->current_dev;
4579 struct ieee80211_conf *conf = &hw->conf;
4580
4581 if (idx != 0)
4582 return -ENOENT;
4583
4584 survey->channel = conf->channel;
4585 survey->filled = SURVEY_INFO_NOISE_DBM;
4586 survey->noise = dev->stats.link_noise;
4587
4588 return 0;
4589}
4590
4574static const struct ieee80211_ops b43_hw_ops = { 4591static const struct ieee80211_ops b43_hw_ops = {
4575 .tx = b43_op_tx, 4592 .tx = b43_op_tx,
4576 .conf_tx = b43_op_conf_tx, 4593 .conf_tx = b43_op_conf_tx,
@@ -4590,6 +4607,7 @@ static const struct ieee80211_ops b43_hw_ops = {
4590 .sta_notify = b43_op_sta_notify, 4607 .sta_notify = b43_op_sta_notify,
4591 .sw_scan_start = b43_op_sw_scan_start_notifier, 4608 .sw_scan_start = b43_op_sw_scan_start_notifier,
4592 .sw_scan_complete = b43_op_sw_scan_complete_notifier, 4609 .sw_scan_complete = b43_op_sw_scan_complete_notifier,
4610 .get_survey = b43_op_get_survey,
4593 .rfkill_poll = b43_rfkill_poll, 4611 .rfkill_poll = b43_rfkill_poll,
4594}; 4612};
4595 4613
@@ -4905,8 +4923,7 @@ static int b43_wireless_init(struct ssb_device *dev)
4905 4923
4906 /* fill hw info */ 4924 /* fill hw info */
4907 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 4925 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
4908 IEEE80211_HW_SIGNAL_DBM | 4926 IEEE80211_HW_SIGNAL_DBM;
4909 IEEE80211_HW_NOISE_DBM;
4910 4927
4911 hw->wiphy->interface_modes = 4928 hw->wiphy->interface_modes =
4912 BIT(NL80211_IFTYPE_AP) | 4929 BIT(NL80211_IFTYPE_AP) |
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index eda06529ef5f..e6b0528f3b52 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -610,7 +610,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
610 } 610 }
611 611
612 /* Link quality statistics */ 612 /* Link quality statistics */
613 status.noise = dev->stats.link_noise;
614 if ((chanstat & B43_RX_CHAN_PHYTYPE) == B43_PHYTYPE_N) { 613 if ((chanstat & B43_RX_CHAN_PHYTYPE) == B43_PHYTYPE_N) {
615// s8 rssi = max(rxhdr->power0, rxhdr->power1); 614// s8 rssi = max(rxhdr->power0, rxhdr->power1);
616 //TODO: Find out what the rssi value is (dBm or percentage?) 615 //TODO: Find out what the rssi value is (dBm or percentage?)
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index bb2dd9329aa0..1713f5f7a58b 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -3482,6 +3482,23 @@ static int b43legacy_op_beacon_set_tim(struct ieee80211_hw *hw,
3482 return 0; 3482 return 0;
3483} 3483}
3484 3484
3485static int b43legacy_op_get_survey(struct ieee80211_hw *hw, int idx,
3486 struct survey_info *survey)
3487{
3488 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
3489 struct b43legacy_wldev *dev = wl->current_dev;
3490 struct ieee80211_conf *conf = &hw->conf;
3491
3492 if (idx != 0)
3493 return -ENOENT;
3494
3495 survey->channel = conf->channel;
3496 survey->filled = SURVEY_INFO_NOISE_DBM;
3497 survey->noise = dev->stats.link_noise;
3498
3499 return 0;
3500}
3501
3485static const struct ieee80211_ops b43legacy_hw_ops = { 3502static const struct ieee80211_ops b43legacy_hw_ops = {
3486 .tx = b43legacy_op_tx, 3503 .tx = b43legacy_op_tx,
3487 .conf_tx = b43legacy_op_conf_tx, 3504 .conf_tx = b43legacy_op_conf_tx,
@@ -3494,6 +3511,7 @@ static const struct ieee80211_ops b43legacy_hw_ops = {
3494 .start = b43legacy_op_start, 3511 .start = b43legacy_op_start,
3495 .stop = b43legacy_op_stop, 3512 .stop = b43legacy_op_stop,
3496 .set_tim = b43legacy_op_beacon_set_tim, 3513 .set_tim = b43legacy_op_beacon_set_tim,
3514 .get_survey = b43legacy_op_get_survey,
3497 .rfkill_poll = b43legacy_rfkill_poll, 3515 .rfkill_poll = b43legacy_rfkill_poll,
3498}; 3516};
3499 3517
@@ -3769,8 +3787,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
3769 3787
3770 /* fill hw info */ 3788 /* fill hw info */
3771 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 3789 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
3772 IEEE80211_HW_SIGNAL_DBM | 3790 IEEE80211_HW_SIGNAL_DBM;
3773 IEEE80211_HW_NOISE_DBM;
3774 hw->wiphy->interface_modes = 3791 hw->wiphy->interface_modes =
3775 BIT(NL80211_IFTYPE_AP) | 3792 BIT(NL80211_IFTYPE_AP) |
3776 BIT(NL80211_IFTYPE_STATION) | 3793 BIT(NL80211_IFTYPE_STATION) |
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 9c8882d9275e..7d177d97f1f7 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -548,7 +548,6 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
548 (phystat0 & B43legacy_RX_PHYST0_OFDM), 548 (phystat0 & B43legacy_RX_PHYST0_OFDM),
549 (phystat0 & B43legacy_RX_PHYST0_GAINCTL), 549 (phystat0 & B43legacy_RX_PHYST0_GAINCTL),
550 (phystat3 & B43legacy_RX_PHYST3_TRSTATE)); 550 (phystat3 & B43legacy_RX_PHYST3_TRSTATE));
551 status.noise = dev->stats.link_noise;
552 /* change to support A PHY */ 551 /* change to support A PHY */
553 if (phystat0 & B43legacy_RX_PHYST0_OFDM) 552 if (phystat0 & B43legacy_RX_PHYST0_OFDM)
554 status.rate_idx = b43legacy_plcp_get_bitrate_idx_ofdm(plcp, false); 553 status.rate_idx = b43legacy_plcp_get_bitrate_idx_ofdm(plcp, false);
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index a684a72eb6e9..7c7235385513 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_IWLAGN) += iwlagn.o
12iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o 12iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o
13iwlagn-objs += iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o 13iwlagn-objs += iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o
14iwlagn-objs += iwl-agn-lib.o 14iwlagn-objs += iwl-agn-lib.o
15iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
15 16
16iwlagn-$(CONFIG_IWL4965) += iwl-4965.o 17iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
17iwlagn-$(CONFIG_IWL5000) += iwl-5000.o 18iwlagn-$(CONFIG_IWL5000) += iwl-5000.o
@@ -21,5 +22,6 @@ iwlagn-$(CONFIG_IWL5000) += iwl-1000.o
21# 3945 22# 3945
22obj-$(CONFIG_IWL3945) += iwl3945.o 23obj-$(CONFIG_IWL3945) += iwl3945.o
23iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o 24iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
25iwl3945-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-3945-debugfs.o
24 26
25ccflags-y += -D__CHECK_ENDIAN__ 27ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 9a0191a5ea35..fb59af2d41c6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -46,6 +46,7 @@
46#include "iwl-helpers.h" 46#include "iwl-helpers.h"
47#include "iwl-agn-hw.h" 47#include "iwl-agn-hw.h"
48#include "iwl-agn-led.h" 48#include "iwl-agn-led.h"
49#include "iwl-agn-debugfs.h"
49 50
50/* Highest firmware API version supported */ 51/* Highest firmware API version supported */
51#define IWL1000_UCODE_API_MAX 3 52#define IWL1000_UCODE_API_MAX 3
@@ -212,6 +213,11 @@ static struct iwl_lib_ops iwl1000_lib = {
212 .set_ct_kill = iwl1000_set_ct_threshold, 213 .set_ct_kill = iwl1000_set_ct_threshold,
213 }, 214 },
214 .add_bcast_station = iwl_add_bcast_station, 215 .add_bcast_station = iwl_add_bcast_station,
216 .debugfs_ops = {
217 .rx_stats_read = iwl_ucode_rx_stats_read,
218 .tx_stats_read = iwl_ucode_tx_stats_read,
219 .general_stats_read = iwl_ucode_general_stats_read,
220 },
215 .recover_from_tx_stall = iwl_bg_monitor_recover, 221 .recover_from_tx_stall = iwl_bg_monitor_recover,
216 .check_plcp_health = iwl_good_plcp_health, 222 .check_plcp_health = iwl_good_plcp_health,
217 .check_ack_health = iwl_good_ack_health, 223 .check_ack_health = iwl_good_ack_health,
@@ -276,7 +282,6 @@ struct iwl_cfg iwl1000_bg_cfg = {
276 .use_bsm = false, 282 .use_bsm = false,
277 .max_ll_items = OTP_MAX_LL_ITEMS_1000, 283 .max_ll_items = OTP_MAX_LL_ITEMS_1000,
278 .shadow_ram_support = false, 284 .shadow_ram_support = false,
279 .ht_greenfield_support = true,
280 .led_compensation = 51, 285 .led_compensation = 51,
281 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 286 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
282 .support_ct_kill_exit = true, 287 .support_ct_kill_exit = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
new file mode 100644
index 000000000000..6a9c64a50e36
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
@@ -0,0 +1,500 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-3945-debugfs.h"
30
31ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
32 char __user *user_buf,
33 size_t count, loff_t *ppos)
34{
35 struct iwl_priv *priv = file->private_data;
36 int pos = 0;
37 char *buf;
38 int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
39 sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
40 ssize_t ret;
41 struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
42 struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
43 struct iwl39_statistics_rx_non_phy *general, *accum_general;
44 struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
45
46 if (!iwl_is_alive(priv))
47 return -EAGAIN;
48
49 buf = kzalloc(bufsz, GFP_KERNEL);
50 if (!buf) {
51 IWL_ERR(priv, "Can not allocate Buffer\n");
52 return -ENOMEM;
53 }
54
55 /*
56 * The statistic information display here is based on
57 * the last statistics notification from uCode
58 * might not reflect the current uCode activity
59 */
60 ofdm = &priv->_3945.statistics.rx.ofdm;
61 cck = &priv->_3945.statistics.rx.cck;
62 general = &priv->_3945.statistics.rx.general;
63 accum_ofdm = &priv->_3945.accum_statistics.rx.ofdm;
64 accum_cck = &priv->_3945.accum_statistics.rx.cck;
65 accum_general = &priv->_3945.accum_statistics.rx.general;
66 delta_ofdm = &priv->_3945.delta_statistics.rx.ofdm;
67 delta_cck = &priv->_3945.delta_statistics.rx.cck;
68 delta_general = &priv->_3945.delta_statistics.rx.general;
69 max_ofdm = &priv->_3945.max_delta.rx.ofdm;
70 max_cck = &priv->_3945.max_delta.rx.cck;
71 max_general = &priv->_3945.max_delta.rx.general;
72
73 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
74 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
75 "acumulative delta max\n",
76 "Statistics_Rx - OFDM:");
77 pos += scnprintf(buf + pos, bufsz - pos,
78 " %-30s %10u %10u %10u %10u\n",
79 "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
80 accum_ofdm->ina_cnt,
81 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
82 pos += scnprintf(buf + pos, bufsz - pos,
83 " %-30s %10u %10u %10u %10u\n",
84 "fina_cnt:",
85 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
86 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
87 pos += scnprintf(buf + pos, bufsz - pos,
88 " %-30s %10u %10u %10u %10u\n", "plcp_err:",
89 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
90 delta_ofdm->plcp_err, max_ofdm->plcp_err);
91 pos += scnprintf(buf + pos, bufsz - pos,
92 " %-30s %10u %10u %10u %10u\n", "crc32_err:",
93 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
94 delta_ofdm->crc32_err, max_ofdm->crc32_err);
95 pos += scnprintf(buf + pos, bufsz - pos,
96 " %-30s %10u %10u %10u %10u\n", "overrun_err:",
97 le32_to_cpu(ofdm->overrun_err),
98 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
99 max_ofdm->overrun_err);
100 pos += scnprintf(buf + pos, bufsz - pos,
101 " %-30s %10u %10u %10u %10u\n",
102 "early_overrun_err:",
103 le32_to_cpu(ofdm->early_overrun_err),
104 accum_ofdm->early_overrun_err,
105 delta_ofdm->early_overrun_err,
106 max_ofdm->early_overrun_err);
107 pos += scnprintf(buf + pos, bufsz - pos,
108 " %-30s %10u %10u %10u %10u\n",
109 "crc32_good:", le32_to_cpu(ofdm->crc32_good),
110 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
111 max_ofdm->crc32_good);
112 pos += scnprintf(buf + pos, bufsz - pos,
113 " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
114 le32_to_cpu(ofdm->false_alarm_cnt),
115 accum_ofdm->false_alarm_cnt,
116 delta_ofdm->false_alarm_cnt,
117 max_ofdm->false_alarm_cnt);
118 pos += scnprintf(buf + pos, bufsz - pos,
119 " %-30s %10u %10u %10u %10u\n",
120 "fina_sync_err_cnt:",
121 le32_to_cpu(ofdm->fina_sync_err_cnt),
122 accum_ofdm->fina_sync_err_cnt,
123 delta_ofdm->fina_sync_err_cnt,
124 max_ofdm->fina_sync_err_cnt);
125 pos += scnprintf(buf + pos, bufsz - pos,
126 " %-30s %10u %10u %10u %10u\n",
127 "sfd_timeout:",
128 le32_to_cpu(ofdm->sfd_timeout),
129 accum_ofdm->sfd_timeout,
130 delta_ofdm->sfd_timeout,
131 max_ofdm->sfd_timeout);
132 pos += scnprintf(buf + pos, bufsz - pos,
133 " %-30s %10u %10u %10u %10u\n",
134 "fina_timeout:",
135 le32_to_cpu(ofdm->fina_timeout),
136 accum_ofdm->fina_timeout,
137 delta_ofdm->fina_timeout,
138 max_ofdm->fina_timeout);
139 pos += scnprintf(buf + pos, bufsz - pos,
140 " %-30s %10u %10u %10u %10u\n",
141 "unresponded_rts:",
142 le32_to_cpu(ofdm->unresponded_rts),
143 accum_ofdm->unresponded_rts,
144 delta_ofdm->unresponded_rts,
145 max_ofdm->unresponded_rts);
146 pos += scnprintf(buf + pos, bufsz - pos,
147 " %-30s %10u %10u %10u %10u\n",
148 "rxe_frame_lmt_ovrun:",
149 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
150 accum_ofdm->rxe_frame_limit_overrun,
151 delta_ofdm->rxe_frame_limit_overrun,
152 max_ofdm->rxe_frame_limit_overrun);
153 pos += scnprintf(buf + pos, bufsz - pos,
154 " %-30s %10u %10u %10u %10u\n",
155 "sent_ack_cnt:",
156 le32_to_cpu(ofdm->sent_ack_cnt),
157 accum_ofdm->sent_ack_cnt,
158 delta_ofdm->sent_ack_cnt,
159 max_ofdm->sent_ack_cnt);
160 pos += scnprintf(buf + pos, bufsz - pos,
161 " %-30s %10u %10u %10u %10u\n",
162 "sent_cts_cnt:",
163 le32_to_cpu(ofdm->sent_cts_cnt),
164 accum_ofdm->sent_cts_cnt,
165 delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
166
167 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
168 "acumulative delta max\n",
169 "Statistics_Rx - CCK:");
170 pos += scnprintf(buf + pos, bufsz - pos,
171 " %-30s %10u %10u %10u %10u\n",
172 "ina_cnt:",
173 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
174 delta_cck->ina_cnt, max_cck->ina_cnt);
175 pos += scnprintf(buf + pos, bufsz - pos,
176 " %-30s %10u %10u %10u %10u\n",
177 "fina_cnt:",
178 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
179 delta_cck->fina_cnt, max_cck->fina_cnt);
180 pos += scnprintf(buf + pos, bufsz - pos,
181 " %-30s %10u %10u %10u %10u\n",
182 "plcp_err:",
183 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
184 delta_cck->plcp_err, max_cck->plcp_err);
185 pos += scnprintf(buf + pos, bufsz - pos,
186 " %-30s %10u %10u %10u %10u\n",
187 "crc32_err:",
188 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
189 delta_cck->crc32_err, max_cck->crc32_err);
190 pos += scnprintf(buf + pos, bufsz - pos,
191 " %-30s %10u %10u %10u %10u\n",
192 "overrun_err:",
193 le32_to_cpu(cck->overrun_err),
194 accum_cck->overrun_err,
195 delta_cck->overrun_err, max_cck->overrun_err);
196 pos += scnprintf(buf + pos, bufsz - pos,
197 " %-30s %10u %10u %10u %10u\n",
198 "early_overrun_err:",
199 le32_to_cpu(cck->early_overrun_err),
200 accum_cck->early_overrun_err,
201 delta_cck->early_overrun_err,
202 max_cck->early_overrun_err);
203 pos += scnprintf(buf + pos, bufsz - pos,
204 " %-30s %10u %10u %10u %10u\n",
205 "crc32_good:",
206 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
207 delta_cck->crc32_good,
208 max_cck->crc32_good);
209 pos += scnprintf(buf + pos, bufsz - pos,
210 " %-30s %10u %10u %10u %10u\n",
211 "false_alarm_cnt:",
212 le32_to_cpu(cck->false_alarm_cnt),
213 accum_cck->false_alarm_cnt,
214 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
215 pos += scnprintf(buf + pos, bufsz - pos,
216 " %-30s %10u %10u %10u %10u\n",
217 "fina_sync_err_cnt:",
218 le32_to_cpu(cck->fina_sync_err_cnt),
219 accum_cck->fina_sync_err_cnt,
220 delta_cck->fina_sync_err_cnt,
221 max_cck->fina_sync_err_cnt);
222 pos += scnprintf(buf + pos, bufsz - pos,
223 " %-30s %10u %10u %10u %10u\n",
224 "sfd_timeout:",
225 le32_to_cpu(cck->sfd_timeout),
226 accum_cck->sfd_timeout,
227 delta_cck->sfd_timeout, max_cck->sfd_timeout);
228 pos += scnprintf(buf + pos, bufsz - pos,
229 " %-30s %10u %10u %10u %10u\n",
230 "fina_timeout:",
231 le32_to_cpu(cck->fina_timeout),
232 accum_cck->fina_timeout,
233 delta_cck->fina_timeout, max_cck->fina_timeout);
234 pos += scnprintf(buf + pos, bufsz - pos,
235 " %-30s %10u %10u %10u %10u\n",
236 "unresponded_rts:",
237 le32_to_cpu(cck->unresponded_rts),
238 accum_cck->unresponded_rts,
239 delta_cck->unresponded_rts,
240 max_cck->unresponded_rts);
241 pos += scnprintf(buf + pos, bufsz - pos,
242 " %-30s %10u %10u %10u %10u\n",
243 "rxe_frame_lmt_ovrun:",
244 le32_to_cpu(cck->rxe_frame_limit_overrun),
245 accum_cck->rxe_frame_limit_overrun,
246 delta_cck->rxe_frame_limit_overrun,
247 max_cck->rxe_frame_limit_overrun);
248 pos += scnprintf(buf + pos, bufsz - pos,
249 " %-30s %10u %10u %10u %10u\n",
250 "sent_ack_cnt:",
251 le32_to_cpu(cck->sent_ack_cnt),
252 accum_cck->sent_ack_cnt,
253 delta_cck->sent_ack_cnt,
254 max_cck->sent_ack_cnt);
255 pos += scnprintf(buf + pos, bufsz - pos,
256 " %-30s %10u %10u %10u %10u\n",
257 "sent_cts_cnt:",
258 le32_to_cpu(cck->sent_cts_cnt),
259 accum_cck->sent_cts_cnt,
260 delta_cck->sent_cts_cnt,
261 max_cck->sent_cts_cnt);
262
263 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
264 "acumulative delta max\n",
265 "Statistics_Rx - GENERAL:");
266 pos += scnprintf(buf + pos, bufsz - pos,
267 " %-30s %10u %10u %10u %10u\n",
268 "bogus_cts:",
269 le32_to_cpu(general->bogus_cts),
270 accum_general->bogus_cts,
271 delta_general->bogus_cts, max_general->bogus_cts);
272 pos += scnprintf(buf + pos, bufsz - pos,
273 " %-30s %10u %10u %10u %10u\n",
274 "bogus_ack:",
275 le32_to_cpu(general->bogus_ack),
276 accum_general->bogus_ack,
277 delta_general->bogus_ack, max_general->bogus_ack);
278 pos += scnprintf(buf + pos, bufsz - pos,
279 " %-30s %10u %10u %10u %10u\n",
280 "non_bssid_frames:",
281 le32_to_cpu(general->non_bssid_frames),
282 accum_general->non_bssid_frames,
283 delta_general->non_bssid_frames,
284 max_general->non_bssid_frames);
285 pos += scnprintf(buf + pos, bufsz - pos,
286 " %-30s %10u %10u %10u %10u\n",
287 "filtered_frames:",
288 le32_to_cpu(general->filtered_frames),
289 accum_general->filtered_frames,
290 delta_general->filtered_frames,
291 max_general->filtered_frames);
292 pos += scnprintf(buf + pos, bufsz - pos,
293 " %-30s %10u %10u %10u %10u\n",
294 "non_channel_beacons:",
295 le32_to_cpu(general->non_channel_beacons),
296 accum_general->non_channel_beacons,
297 delta_general->non_channel_beacons,
298 max_general->non_channel_beacons);
299
300 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
301 kfree(buf);
302 return ret;
303}
304
305ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
306 char __user *user_buf,
307 size_t count, loff_t *ppos)
308{
309 struct iwl_priv *priv = file->private_data;
310 int pos = 0;
311 char *buf;
312 int bufsz = (sizeof(struct iwl39_statistics_tx) * 48) + 250;
313 ssize_t ret;
314 struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
315
316 if (!iwl_is_alive(priv))
317 return -EAGAIN;
318
319 buf = kzalloc(bufsz, GFP_KERNEL);
320 if (!buf) {
321 IWL_ERR(priv, "Can not allocate Buffer\n");
322 return -ENOMEM;
323 }
324
325 /*
326 * The statistic information display here is based on
327 * the last statistics notification from uCode
328 * might not reflect the current uCode activity
329 */
330 tx = &priv->_3945.statistics.tx;
331 accum_tx = &priv->_3945.accum_statistics.tx;
332 delta_tx = &priv->_3945.delta_statistics.tx;
333 max_tx = &priv->_3945.max_delta.tx;
334 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
335 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
336 "acumulative delta max\n",
337 "Statistics_Tx:");
338 pos += scnprintf(buf + pos, bufsz - pos,
339 " %-30s %10u %10u %10u %10u\n",
340 "preamble:",
341 le32_to_cpu(tx->preamble_cnt),
342 accum_tx->preamble_cnt,
343 delta_tx->preamble_cnt, max_tx->preamble_cnt);
344 pos += scnprintf(buf + pos, bufsz - pos,
345 " %-30s %10u %10u %10u %10u\n",
346 "rx_detected_cnt:",
347 le32_to_cpu(tx->rx_detected_cnt),
348 accum_tx->rx_detected_cnt,
349 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
350 pos += scnprintf(buf + pos, bufsz - pos,
351 " %-30s %10u %10u %10u %10u\n",
352 "bt_prio_defer_cnt:",
353 le32_to_cpu(tx->bt_prio_defer_cnt),
354 accum_tx->bt_prio_defer_cnt,
355 delta_tx->bt_prio_defer_cnt,
356 max_tx->bt_prio_defer_cnt);
357 pos += scnprintf(buf + pos, bufsz - pos,
358 " %-30s %10u %10u %10u %10u\n",
359 "bt_prio_kill_cnt:",
360 le32_to_cpu(tx->bt_prio_kill_cnt),
361 accum_tx->bt_prio_kill_cnt,
362 delta_tx->bt_prio_kill_cnt,
363 max_tx->bt_prio_kill_cnt);
364 pos += scnprintf(buf + pos, bufsz - pos,
365 " %-30s %10u %10u %10u %10u\n",
366 "few_bytes_cnt:",
367 le32_to_cpu(tx->few_bytes_cnt),
368 accum_tx->few_bytes_cnt,
369 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
370 pos += scnprintf(buf + pos, bufsz - pos,
371 " %-30s %10u %10u %10u %10u\n",
372 "cts_timeout:",
373 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
374 delta_tx->cts_timeout, max_tx->cts_timeout);
375 pos += scnprintf(buf + pos, bufsz - pos,
376 " %-30s %10u %10u %10u %10u\n",
377 "ack_timeout:",
378 le32_to_cpu(tx->ack_timeout),
379 accum_tx->ack_timeout,
380 delta_tx->ack_timeout, max_tx->ack_timeout);
381 pos += scnprintf(buf + pos, bufsz - pos,
382 " %-30s %10u %10u %10u %10u\n",
383 "expected_ack_cnt:",
384 le32_to_cpu(tx->expected_ack_cnt),
385 accum_tx->expected_ack_cnt,
386 delta_tx->expected_ack_cnt,
387 max_tx->expected_ack_cnt);
388 pos += scnprintf(buf + pos, bufsz - pos,
389 " %-30s %10u %10u %10u %10u\n",
390 "actual_ack_cnt:",
391 le32_to_cpu(tx->actual_ack_cnt),
392 accum_tx->actual_ack_cnt,
393 delta_tx->actual_ack_cnt,
394 max_tx->actual_ack_cnt);
395
396 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
397 kfree(buf);
398 return ret;
399}
400
401ssize_t iwl3945_ucode_general_stats_read(struct file *file,
402 char __user *user_buf,
403 size_t count, loff_t *ppos)
404{
405 struct iwl_priv *priv = file->private_data;
406 int pos = 0;
407 char *buf;
408 int bufsz = sizeof(struct iwl39_statistics_general) * 10 + 300;
409 ssize_t ret;
410 struct iwl39_statistics_general *general, *accum_general;
411 struct iwl39_statistics_general *delta_general, *max_general;
412 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
413 struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
414
415 if (!iwl_is_alive(priv))
416 return -EAGAIN;
417
418 buf = kzalloc(bufsz, GFP_KERNEL);
419 if (!buf) {
420 IWL_ERR(priv, "Can not allocate Buffer\n");
421 return -ENOMEM;
422 }
423
424 /*
425 * The statistic information display here is based on
426 * the last statistics notification from uCode
427 * might not reflect the current uCode activity
428 */
429 general = &priv->_3945.statistics.general;
430 dbg = &priv->_3945.statistics.general.dbg;
431 div = &priv->_3945.statistics.general.div;
432 accum_general = &priv->_3945.accum_statistics.general;
433 delta_general = &priv->_3945.delta_statistics.general;
434 max_general = &priv->_3945.max_delta.general;
435 accum_dbg = &priv->_3945.accum_statistics.general.dbg;
436 delta_dbg = &priv->_3945.delta_statistics.general.dbg;
437 max_dbg = &priv->_3945.max_delta.general.dbg;
438 accum_div = &priv->_3945.accum_statistics.general.div;
439 delta_div = &priv->_3945.delta_statistics.general.div;
440 max_div = &priv->_3945.max_delta.general.div;
441 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
442 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
443 "acumulative delta max\n",
444 "Statistics_General:");
445 pos += scnprintf(buf + pos, bufsz - pos,
446 " %-30s %10u %10u %10u %10u\n",
447 "burst_check:",
448 le32_to_cpu(dbg->burst_check),
449 accum_dbg->burst_check,
450 delta_dbg->burst_check, max_dbg->burst_check);
451 pos += scnprintf(buf + pos, bufsz - pos,
452 " %-30s %10u %10u %10u %10u\n",
453 "burst_count:",
454 le32_to_cpu(dbg->burst_count),
455 accum_dbg->burst_count,
456 delta_dbg->burst_count, max_dbg->burst_count);
457 pos += scnprintf(buf + pos, bufsz - pos,
458 " %-30s %10u %10u %10u %10u\n",
459 "sleep_time:",
460 le32_to_cpu(general->sleep_time),
461 accum_general->sleep_time,
462 delta_general->sleep_time, max_general->sleep_time);
463 pos += scnprintf(buf + pos, bufsz - pos,
464 " %-30s %10u %10u %10u %10u\n",
465 "slots_out:",
466 le32_to_cpu(general->slots_out),
467 accum_general->slots_out,
468 delta_general->slots_out, max_general->slots_out);
469 pos += scnprintf(buf + pos, bufsz - pos,
470 " %-30s %10u %10u %10u %10u\n",
471 "slots_idle:",
472 le32_to_cpu(general->slots_idle),
473 accum_general->slots_idle,
474 delta_general->slots_idle, max_general->slots_idle);
475 pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
476 le32_to_cpu(general->ttl_timestamp));
477 pos += scnprintf(buf + pos, bufsz - pos,
478 " %-30s %10u %10u %10u %10u\n",
479 "tx_on_a:",
480 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
481 delta_div->tx_on_a, max_div->tx_on_a);
482 pos += scnprintf(buf + pos, bufsz - pos,
483 " %-30s %10u %10u %10u %10u\n",
484 "tx_on_b:",
485 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
486 delta_div->tx_on_b, max_div->tx_on_b);
487 pos += scnprintf(buf + pos, bufsz - pos,
488 " %-30s %10u %10u %10u %10u\n",
489 "exec_time:",
490 le32_to_cpu(div->exec_time), accum_div->exec_time,
491 delta_div->exec_time, max_div->exec_time);
492 pos += scnprintf(buf + pos, bufsz - pos,
493 " %-30s %10u %10u %10u %10u\n",
494 "probe_time:",
495 le32_to_cpu(div->probe_time), accum_div->probe_time,
496 delta_div->probe_time, max_div->probe_time);
497 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
498 kfree(buf);
499 return ret;
500}
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h
new file mode 100644
index 000000000000..70809c53c215
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h
@@ -0,0 +1,60 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-dev.h"
30#include "iwl-core.h"
31#include "iwl-debug.h"
32
33#ifdef CONFIG_IWLWIFI_DEBUGFS
34ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos);
36ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos);
38ssize_t iwl3945_ucode_general_stats_read(struct file *file,
39 char __user *user_buf, size_t count,
40 loff_t *ppos);
41#else
42static ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
43 char __user *user_buf, size_t count,
44 loff_t *ppos)
45{
46 return 0;
47}
48static ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
49 char __user *user_buf, size_t count,
50 loff_t *ppos)
51{
52 return 0;
53}
54static ssize_t iwl3945_ucode_general_stats_read(struct file *file,
55 char __user *user_buf,
56 size_t count, loff_t *ppos)
57{
58 return 0;
59}
60#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index bde3b4cbab9d..17197a78d894 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -50,6 +50,7 @@
50#include "iwl-helpers.h" 50#include "iwl-helpers.h"
51#include "iwl-led.h" 51#include "iwl-led.h"
52#include "iwl-3945-led.h" 52#include "iwl-3945-led.h"
53#include "iwl-3945-debugfs.h"
53 54
54#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \ 55#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
55 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ 56 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
@@ -293,7 +294,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
293 * iwl3945_rx_reply_tx - Handle Tx response 294 * iwl3945_rx_reply_tx - Handle Tx response
294 */ 295 */
295static void iwl3945_rx_reply_tx(struct iwl_priv *priv, 296static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
296 struct iwl_rx_mem_buffer *rxb) 297 struct iwl_rx_mem_buffer *rxb)
297{ 298{
298 struct iwl_rx_packet *pkt = rxb_addr(rxb); 299 struct iwl_rx_packet *pkt = rxb_addr(rxb);
299 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 300 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -351,18 +352,81 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
351 * RX handler implementations 352 * RX handler implementations
352 * 353 *
353 *****************************************************************************/ 354 *****************************************************************************/
355#ifdef CONFIG_IWLWIFI_DEBUG
356/*
357 * based on the assumption of all statistics counter are in DWORD
358 * FIXME: This function is for debugging, do not deal with
359 * the case of counters roll-over.
360 */
361static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
362 __le32 *stats)
363{
364 int i;
365 __le32 *prev_stats;
366 u32 *accum_stats;
367 u32 *delta, *max_delta;
368
369 prev_stats = (__le32 *)&priv->_3945.statistics;
370 accum_stats = (u32 *)&priv->_3945.accum_statistics;
371 delta = (u32 *)&priv->_3945.delta_statistics;
372 max_delta = (u32 *)&priv->_3945.max_delta;
373
374 for (i = sizeof(__le32); i < sizeof(struct iwl3945_notif_statistics);
375 i += sizeof(__le32), stats++, prev_stats++, delta++,
376 max_delta++, accum_stats++) {
377 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
378 *delta = (le32_to_cpu(*stats) -
379 le32_to_cpu(*prev_stats));
380 *accum_stats += *delta;
381 if (*delta > *max_delta)
382 *max_delta = *delta;
383 }
384 }
385
386 /* reset accumulative statistics for "no-counter" type statistics */
387 priv->_3945.accum_statistics.general.temperature =
388 priv->_3945.statistics.general.temperature;
389 priv->_3945.accum_statistics.general.ttl_timestamp =
390 priv->_3945.statistics.general.ttl_timestamp;
391}
392#endif
354 393
355void iwl3945_hw_rx_statistics(struct iwl_priv *priv, 394void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
356 struct iwl_rx_mem_buffer *rxb) 395 struct iwl_rx_mem_buffer *rxb)
357{ 396{
358 struct iwl_rx_packet *pkt = rxb_addr(rxb); 397 struct iwl_rx_packet *pkt = rxb_addr(rxb);
398
359 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", 399 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
360 (int)sizeof(struct iwl3945_notif_statistics), 400 (int)sizeof(struct iwl3945_notif_statistics),
361 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); 401 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
402#ifdef CONFIG_IWLWIFI_DEBUG
403 iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
404#endif
362 405
363 memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics)); 406 memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
364} 407}
365 408
409void iwl3945_reply_statistics(struct iwl_priv *priv,
410 struct iwl_rx_mem_buffer *rxb)
411{
412 struct iwl_rx_packet *pkt = rxb_addr(rxb);
413 __le32 *flag = (__le32 *)&pkt->u.raw;
414
415 if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
416#ifdef CONFIG_IWLWIFI_DEBUG
417 memset(&priv->_3945.accum_statistics, 0,
418 sizeof(struct iwl3945_notif_statistics));
419 memset(&priv->_3945.delta_statistics, 0,
420 sizeof(struct iwl3945_notif_statistics));
421 memset(&priv->_3945.max_delta, 0,
422 sizeof(struct iwl3945_notif_statistics));
423#endif
424 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
425 }
426 iwl3945_hw_rx_statistics(priv, rxb);
427}
428
429
366/****************************************************************************** 430/******************************************************************************
367 * 431 *
368 * Misc. internal state and helper functions 432 * Misc. internal state and helper functions
@@ -2688,6 +2752,7 @@ IWL3945_UCODE_GET(boot_size);
2688static struct iwl_hcmd_ops iwl3945_hcmd = { 2752static struct iwl_hcmd_ops iwl3945_hcmd = {
2689 .rxon_assoc = iwl3945_send_rxon_assoc, 2753 .rxon_assoc = iwl3945_send_rxon_assoc,
2690 .commit_rxon = iwl3945_commit_rxon, 2754 .commit_rxon = iwl3945_commit_rxon,
2755 .send_bt_config = iwl_send_bt_config,
2691}; 2756};
2692 2757
2693static struct iwl_ucode_ops iwl3945_ucode = { 2758static struct iwl_ucode_ops iwl3945_ucode = {
@@ -2735,12 +2800,19 @@ static struct iwl_lib_ops iwl3945_lib = {
2735 .isr = iwl_isr_legacy, 2800 .isr = iwl_isr_legacy,
2736 .config_ap = iwl3945_config_ap, 2801 .config_ap = iwl3945_config_ap,
2737 .add_bcast_station = iwl3945_add_bcast_station, 2802 .add_bcast_station = iwl3945_add_bcast_station,
2803
2804 .debugfs_ops = {
2805 .rx_stats_read = iwl3945_ucode_rx_stats_read,
2806 .tx_stats_read = iwl3945_ucode_tx_stats_read,
2807 .general_stats_read = iwl3945_ucode_general_stats_read,
2808 },
2738}; 2809};
2739 2810
2740static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = { 2811static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
2741 .get_hcmd_size = iwl3945_get_hcmd_size, 2812 .get_hcmd_size = iwl3945_get_hcmd_size,
2742 .build_addsta_hcmd = iwl3945_build_addsta_hcmd, 2813 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
2743 .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag, 2814 .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
2815 .request_scan = iwl3945_request_scan,
2744}; 2816};
2745 2817
2746static const struct iwl_ops iwl3945_ops = { 2818static const struct iwl_ops iwl3945_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index b89219573b91..643adb644bb8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -264,6 +264,8 @@ extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv);
264extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power); 264extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
265extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv, 265extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
266 struct iwl_rx_mem_buffer *rxb); 266 struct iwl_rx_mem_buffer *rxb);
267void iwl3945_reply_statistics(struct iwl_priv *priv,
268 struct iwl_rx_mem_buffer *rxb);
267extern void iwl3945_disable_events(struct iwl_priv *priv); 269extern void iwl3945_disable_events(struct iwl_priv *priv);
268extern int iwl4965_get_temperature(const struct iwl_priv *priv); 270extern int iwl4965_get_temperature(const struct iwl_priv *priv);
269extern void iwl3945_post_associate(struct iwl_priv *priv); 271extern void iwl3945_post_associate(struct iwl_priv *priv);
@@ -294,6 +296,9 @@ extern const struct iwl_channel_info *iwl3945_get_channel_info(
294 296
295extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate); 297extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate);
296 298
299/* scanning */
300void iwl3945_request_scan(struct iwl_priv *priv);
301
297/* Requires full declaration of iwl_priv before including */ 302/* Requires full declaration of iwl_priv before including */
298#include "iwl-io.h" 303#include "iwl-io.h"
299 304
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 2e3cda75f3ad..136c29067489 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -47,6 +47,7 @@
47#include "iwl-sta.h" 47#include "iwl-sta.h"
48#include "iwl-agn-led.h" 48#include "iwl-agn-led.h"
49#include "iwl-agn.h" 49#include "iwl-agn.h"
50#include "iwl-agn-debugfs.h"
50 51
51static int iwl4965_send_tx_power(struct iwl_priv *priv); 52static int iwl4965_send_tx_power(struct iwl_priv *priv);
52static int iwl4965_hw_get_temperature(struct iwl_priv *priv); 53static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
@@ -2143,6 +2144,7 @@ static struct iwl_hcmd_ops iwl4965_hcmd = {
2143 .rxon_assoc = iwl4965_send_rxon_assoc, 2144 .rxon_assoc = iwl4965_send_rxon_assoc,
2144 .commit_rxon = iwl_commit_rxon, 2145 .commit_rxon = iwl_commit_rxon,
2145 .set_rxon_chain = iwl_set_rxon_chain, 2146 .set_rxon_chain = iwl_set_rxon_chain,
2147 .send_bt_config = iwl_send_bt_config,
2146}; 2148};
2147 2149
2148static struct iwl_ucode_ops iwl4965_ucode = { 2150static struct iwl_ucode_ops iwl4965_ucode = {
@@ -2162,6 +2164,7 @@ static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
2162 .gain_computation = iwl4965_gain_computation, 2164 .gain_computation = iwl4965_gain_computation,
2163 .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag, 2165 .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
2164 .calc_rssi = iwl4965_calc_rssi, 2166 .calc_rssi = iwl4965_calc_rssi,
2167 .request_scan = iwlagn_request_scan,
2165}; 2168};
2166 2169
2167static struct iwl_lib_ops iwl4965_lib = { 2170static struct iwl_lib_ops iwl4965_lib = {
@@ -2216,6 +2219,11 @@ static struct iwl_lib_ops iwl4965_lib = {
2216 .set_ct_kill = iwl4965_set_ct_threshold, 2219 .set_ct_kill = iwl4965_set_ct_threshold,
2217 }, 2220 },
2218 .add_bcast_station = iwl_add_bcast_station, 2221 .add_bcast_station = iwl_add_bcast_station,
2222 .debugfs_ops = {
2223 .rx_stats_read = iwl_ucode_rx_stats_read,
2224 .tx_stats_read = iwl_ucode_tx_stats_read,
2225 .general_stats_read = iwl_ucode_general_stats_read,
2226 },
2219 .check_plcp_health = iwl_good_plcp_health, 2227 .check_plcp_health = iwl_good_plcp_health,
2220}; 2228};
2221 2229
@@ -2253,8 +2261,13 @@ struct iwl_cfg iwl4965_agn_cfg = {
2253 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 2261 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
2254 .monitor_recover_period = IWL_MONITORING_PERIOD, 2262 .monitor_recover_period = IWL_MONITORING_PERIOD,
2255 .temperature_kelvin = true, 2263 .temperature_kelvin = true,
2256 .off_channel_workaround = true,
2257 .max_event_log_size = 512, 2264 .max_event_log_size = 512,
2265
2266 /*
2267 * Force use of chains B and C for scan RX on 5 GHz band
2268 * because the device has off-channel reception on chain A.
2269 */
2270 .scan_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
2258}; 2271};
2259 2272
2260/* Module firmware */ 2273/* Module firmware */
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index e967cfcac224..115d3ea1142f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -48,6 +48,7 @@
48#include "iwl-agn-led.h" 48#include "iwl-agn-led.h"
49#include "iwl-agn-hw.h" 49#include "iwl-agn-hw.h"
50#include "iwl-5000-hw.h" 50#include "iwl-5000-hw.h"
51#include "iwl-agn-debugfs.h"
51 52
52/* Highest firmware API version supported */ 53/* Highest firmware API version supported */
53#define IWL5000_UCODE_API_MAX 2 54#define IWL5000_UCODE_API_MAX 2
@@ -199,26 +200,57 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
199 200
200 /* Set initial sensitivity parameters */ 201 /* Set initial sensitivity parameters */
201 /* Set initial calibration set */ 202 /* Set initial calibration set */
202 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) { 203 priv->hw_params.sens = &iwl5000_sensitivity;
203 case CSR_HW_REV_TYPE_5150: 204 priv->hw_params.calib_init_cfg =
204 priv->hw_params.sens = &iwl5150_sensitivity; 205 BIT(IWL_CALIB_XTAL) |
205 priv->hw_params.calib_init_cfg = 206 BIT(IWL_CALIB_LO) |
206 BIT(IWL_CALIB_DC) | 207 BIT(IWL_CALIB_TX_IQ) |
207 BIT(IWL_CALIB_LO) | 208 BIT(IWL_CALIB_TX_IQ_PERD) |
208 BIT(IWL_CALIB_TX_IQ) | 209 BIT(IWL_CALIB_BASE_BAND);
209 BIT(IWL_CALIB_BASE_BAND); 210
210 211 return 0;
211 break; 212}
212 default: 213
213 priv->hw_params.sens = &iwl5000_sensitivity; 214static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
214 priv->hw_params.calib_init_cfg = 215{
215 BIT(IWL_CALIB_XTAL) | 216 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
216 BIT(IWL_CALIB_LO) | 217 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
217 BIT(IWL_CALIB_TX_IQ) | 218 priv->cfg->num_of_queues =
218 BIT(IWL_CALIB_TX_IQ_PERD) | 219 priv->cfg->mod_params->num_of_queues;
219 BIT(IWL_CALIB_BASE_BAND); 220
220 break; 221 priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
221 } 222 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
223 priv->hw_params.scd_bc_tbls_size =
224 priv->cfg->num_of_queues *
225 sizeof(struct iwlagn_scd_bc_tbl);
226 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
227 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
228 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
229
230 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
231 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
232
233 priv->hw_params.max_bsm_size = 0;
234 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
235 BIT(IEEE80211_BAND_5GHZ);
236 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
237
238 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
239 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
240 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
241 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
242
243 if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
244 priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
245
246 /* Set initial sensitivity parameters */
247 /* Set initial calibration set */
248 priv->hw_params.sens = &iwl5150_sensitivity;
249 priv->hw_params.calib_init_cfg =
250 BIT(IWL_CALIB_DC) |
251 BIT(IWL_CALIB_LO) |
252 BIT(IWL_CALIB_TX_IQ) |
253 BIT(IWL_CALIB_BASE_BAND);
222 254
223 return 0; 255 return 0;
224} 256}
@@ -320,13 +352,18 @@ static struct iwl_lib_ops iwl5000_lib = {
320 .set_ct_kill = iwl5000_set_ct_threshold, 352 .set_ct_kill = iwl5000_set_ct_threshold,
321 }, 353 },
322 .add_bcast_station = iwl_add_bcast_station, 354 .add_bcast_station = iwl_add_bcast_station,
355 .debugfs_ops = {
356 .rx_stats_read = iwl_ucode_rx_stats_read,
357 .tx_stats_read = iwl_ucode_tx_stats_read,
358 .general_stats_read = iwl_ucode_general_stats_read,
359 },
323 .recover_from_tx_stall = iwl_bg_monitor_recover, 360 .recover_from_tx_stall = iwl_bg_monitor_recover,
324 .check_plcp_health = iwl_good_plcp_health, 361 .check_plcp_health = iwl_good_plcp_health,
325 .check_ack_health = iwl_good_ack_health, 362 .check_ack_health = iwl_good_ack_health,
326}; 363};
327 364
328static struct iwl_lib_ops iwl5150_lib = { 365static struct iwl_lib_ops iwl5150_lib = {
329 .set_hw_params = iwl5000_hw_set_hw_params, 366 .set_hw_params = iwl5150_hw_set_hw_params,
330 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl, 367 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
331 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl, 368 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
332 .txq_set_sched = iwlagn_txq_set_sched, 369 .txq_set_sched = iwlagn_txq_set_sched,
@@ -377,6 +414,11 @@ static struct iwl_lib_ops iwl5150_lib = {
377 .set_ct_kill = iwl5150_set_ct_threshold, 414 .set_ct_kill = iwl5150_set_ct_threshold,
378 }, 415 },
379 .add_bcast_station = iwl_add_bcast_station, 416 .add_bcast_station = iwl_add_bcast_station,
417 .debugfs_ops = {
418 .rx_stats_read = iwl_ucode_rx_stats_read,
419 .tx_stats_read = iwl_ucode_tx_stats_read,
420 .general_stats_read = iwl_ucode_general_stats_read,
421 },
380 .recover_from_tx_stall = iwl_bg_monitor_recover, 422 .recover_from_tx_stall = iwl_bg_monitor_recover,
381 .check_plcp_health = iwl_good_plcp_health, 423 .check_plcp_health = iwl_good_plcp_health,
382 .check_ack_health = iwl_good_ack_health, 424 .check_ack_health = iwl_good_ack_health,
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index dd03384432f4..7acef703253a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -47,17 +47,19 @@
47#include "iwl-agn-hw.h" 47#include "iwl-agn-hw.h"
48#include "iwl-6000-hw.h" 48#include "iwl-6000-hw.h"
49#include "iwl-agn-led.h" 49#include "iwl-agn-led.h"
50#include "iwl-agn-debugfs.h"
50 51
51/* Highest firmware API version supported */ 52/* Highest firmware API version supported */
52#define IWL6000_UCODE_API_MAX 4 53#define IWL6000_UCODE_API_MAX 4
53#define IWL6050_UCODE_API_MAX 4 54#define IWL6050_UCODE_API_MAX 4
55#define IWL6000G2_UCODE_API_MAX 4
54 56
55/* Lowest firmware API version supported */ 57/* Lowest firmware API version supported */
56#define IWL6000_UCODE_API_MIN 4 58#define IWL6000_UCODE_API_MIN 4
57#define IWL6050_UCODE_API_MIN 4 59#define IWL6050_UCODE_API_MIN 4
60#define IWL6000G2_UCODE_API_MIN 4
58 61
59#define IWL6000_FW_PRE "iwlwifi-6000-" 62#define IWL6000_FW_PRE "iwlwifi-6000-"
60#define IWL6000_G2_FW_PRE "iwlwifi-6005-"
61#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode" 63#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode"
62#define IWL6000_MODULE_FIRMWARE(api) _IWL6000_MODULE_FIRMWARE(api) 64#define IWL6000_MODULE_FIRMWARE(api) _IWL6000_MODULE_FIRMWARE(api)
63 65
@@ -65,6 +67,10 @@
65#define _IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode" 67#define _IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode"
66#define IWL6050_MODULE_FIRMWARE(api) _IWL6050_MODULE_FIRMWARE(api) 68#define IWL6050_MODULE_FIRMWARE(api) _IWL6050_MODULE_FIRMWARE(api)
67 69
70#define IWL6000G2_FW_PRE "iwlwifi-6005-"
71#define _IWL6000G2_MODULE_FIRMWARE(api) IWL6000G2_FW_PRE #api ".ucode"
72#define IWL6000G2_MODULE_FIRMWARE(api) _IWL6000G2_MODULE_FIRMWARE(api)
73
68static void iwl6000_set_ct_threshold(struct iwl_priv *priv) 74static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
69{ 75{
70 /* want Celsius */ 76 /* want Celsius */
@@ -170,24 +176,56 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
170 /* Set initial sensitivity parameters */ 176 /* Set initial sensitivity parameters */
171 /* Set initial calibration set */ 177 /* Set initial calibration set */
172 priv->hw_params.sens = &iwl6000_sensitivity; 178 priv->hw_params.sens = &iwl6000_sensitivity;
173 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) { 179 priv->hw_params.calib_init_cfg =
174 case CSR_HW_REV_TYPE_6x50: 180 BIT(IWL_CALIB_XTAL) |
175 priv->hw_params.calib_init_cfg = 181 BIT(IWL_CALIB_LO) |
176 BIT(IWL_CALIB_XTAL) | 182 BIT(IWL_CALIB_TX_IQ) |
177 BIT(IWL_CALIB_DC) | 183 BIT(IWL_CALIB_BASE_BAND);
178 BIT(IWL_CALIB_LO) | 184
179 BIT(IWL_CALIB_TX_IQ) | 185 return 0;
180 BIT(IWL_CALIB_BASE_BAND); 186}
181 187
182 break; 188static int iwl6050_hw_set_hw_params(struct iwl_priv *priv)
183 default: 189{
184 priv->hw_params.calib_init_cfg = 190 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
185 BIT(IWL_CALIB_XTAL) | 191 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
186 BIT(IWL_CALIB_LO) | 192 priv->cfg->num_of_queues =
187 BIT(IWL_CALIB_TX_IQ) | 193 priv->cfg->mod_params->num_of_queues;
188 BIT(IWL_CALIB_BASE_BAND); 194
189 break; 195 priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
190 } 196 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
197 priv->hw_params.scd_bc_tbls_size =
198 priv->cfg->num_of_queues *
199 sizeof(struct iwlagn_scd_bc_tbl);
200 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
201 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
202 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
203
204 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
205 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
206
207 priv->hw_params.max_bsm_size = 0;
208 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
209 BIT(IEEE80211_BAND_5GHZ);
210 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
211
212 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
213 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
214 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
215 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
216
217 if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
218 priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
219
220 /* Set initial sensitivity parameters */
221 /* Set initial calibration set */
222 priv->hw_params.sens = &iwl6000_sensitivity;
223 priv->hw_params.calib_init_cfg =
224 BIT(IWL_CALIB_XTAL) |
225 BIT(IWL_CALIB_DC) |
226 BIT(IWL_CALIB_LO) |
227 BIT(IWL_CALIB_TX_IQ) |
228 BIT(IWL_CALIB_BASE_BAND);
191 229
192 return 0; 230 return 0;
193} 231}
@@ -261,7 +299,7 @@ static struct iwl_lib_ops iwl6000_lib = {
261 EEPROM_REG_BAND_3_CHANNELS, 299 EEPROM_REG_BAND_3_CHANNELS,
262 EEPROM_REG_BAND_4_CHANNELS, 300 EEPROM_REG_BAND_4_CHANNELS,
263 EEPROM_REG_BAND_5_CHANNELS, 301 EEPROM_REG_BAND_5_CHANNELS,
264 EEPROM_REG_BAND_24_HT40_CHANNELS, 302 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
265 EEPROM_REG_BAND_52_HT40_CHANNELS 303 EEPROM_REG_BAND_52_HT40_CHANNELS
266 }, 304 },
267 .verify_signature = iwlcore_eeprom_verify_signature, 305 .verify_signature = iwlcore_eeprom_verify_signature,
@@ -279,6 +317,11 @@ static struct iwl_lib_ops iwl6000_lib = {
279 .set_ct_kill = iwl6000_set_ct_threshold, 317 .set_ct_kill = iwl6000_set_ct_threshold,
280 }, 318 },
281 .add_bcast_station = iwl_add_bcast_station, 319 .add_bcast_station = iwl_add_bcast_station,
320 .debugfs_ops = {
321 .rx_stats_read = iwl_ucode_rx_stats_read,
322 .tx_stats_read = iwl_ucode_tx_stats_read,
323 .general_stats_read = iwl_ucode_general_stats_read,
324 },
282 .recover_from_tx_stall = iwl_bg_monitor_recover, 325 .recover_from_tx_stall = iwl_bg_monitor_recover,
283 .check_plcp_health = iwl_good_plcp_health, 326 .check_plcp_health = iwl_good_plcp_health,
284 .check_ack_health = iwl_good_ack_health, 327 .check_ack_health = iwl_good_ack_health,
@@ -293,7 +336,7 @@ static const struct iwl_ops iwl6000_ops = {
293}; 336};
294 337
295static struct iwl_lib_ops iwl6050_lib = { 338static struct iwl_lib_ops iwl6050_lib = {
296 .set_hw_params = iwl6000_hw_set_hw_params, 339 .set_hw_params = iwl6050_hw_set_hw_params,
297 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl, 340 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
298 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl, 341 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
299 .txq_set_sched = iwlagn_txq_set_sched, 342 .txq_set_sched = iwlagn_txq_set_sched,
@@ -328,7 +371,7 @@ static struct iwl_lib_ops iwl6050_lib = {
328 EEPROM_REG_BAND_3_CHANNELS, 371 EEPROM_REG_BAND_3_CHANNELS,
329 EEPROM_REG_BAND_4_CHANNELS, 372 EEPROM_REG_BAND_4_CHANNELS,
330 EEPROM_REG_BAND_5_CHANNELS, 373 EEPROM_REG_BAND_5_CHANNELS,
331 EEPROM_REG_BAND_24_HT40_CHANNELS, 374 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
332 EEPROM_REG_BAND_52_HT40_CHANNELS 375 EEPROM_REG_BAND_52_HT40_CHANNELS
333 }, 376 },
334 .verify_signature = iwlcore_eeprom_verify_signature, 377 .verify_signature = iwlcore_eeprom_verify_signature,
@@ -347,6 +390,11 @@ static struct iwl_lib_ops iwl6050_lib = {
347 .set_calib_version = iwl6050_set_calib_version, 390 .set_calib_version = iwl6050_set_calib_version,
348 }, 391 },
349 .add_bcast_station = iwl_add_bcast_station, 392 .add_bcast_station = iwl_add_bcast_station,
393 .debugfs_ops = {
394 .rx_stats_read = iwl_ucode_rx_stats_read,
395 .tx_stats_read = iwl_ucode_tx_stats_read,
396 .general_stats_read = iwl_ucode_general_stats_read,
397 },
350 .recover_from_tx_stall = iwl_bg_monitor_recover, 398 .recover_from_tx_stall = iwl_bg_monitor_recover,
351 .check_plcp_health = iwl_good_plcp_health, 399 .check_plcp_health = iwl_good_plcp_health,
352 .check_ack_health = iwl_good_ack_health, 400 .check_ack_health = iwl_good_ack_health,
@@ -363,16 +411,16 @@ static const struct iwl_ops iwl6050_ops = {
363/* 411/*
364 * "i": Internal configuration, use internal Power Amplifier 412 * "i": Internal configuration, use internal Power Amplifier
365 */ 413 */
366struct iwl_cfg iwl6000i_g2_2agn_cfg = { 414struct iwl_cfg iwl6000g2_2agn_cfg = {
367 .name = "6000 Series 2x2 AGN Gen2", 415 .name = "6000 Series 2x2 AGN Gen2",
368 .fw_name_pre = IWL6000_G2_FW_PRE, 416 .fw_name_pre = IWL6000G2_FW_PRE,
369 .ucode_api_max = IWL6000_UCODE_API_MAX, 417 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
370 .ucode_api_min = IWL6000_UCODE_API_MIN, 418 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
371 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 419 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
372 .ops = &iwl6000_ops, 420 .ops = &iwl6000_ops,
373 .eeprom_size = OTP_LOW_IMAGE_SIZE, 421 .eeprom_size = OTP_LOW_IMAGE_SIZE,
374 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 422 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
375 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, 423 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
376 .num_of_queues = IWLAGN_NUM_QUEUES, 424 .num_of_queues = IWLAGN_NUM_QUEUES,
377 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, 425 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
378 .mod_params = &iwlagn_mod_params, 426 .mod_params = &iwlagn_mod_params,
@@ -381,7 +429,7 @@ struct iwl_cfg iwl6000i_g2_2agn_cfg = {
381 .pll_cfg_val = 0, 429 .pll_cfg_val = 0,
382 .set_l0s = true, 430 .set_l0s = true,
383 .use_bsm = false, 431 .use_bsm = false,
384 .pa_type = IWL_PA_INTERNAL, 432 .pa_type = IWL_PA_SYSTEM,
385 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 433 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
386 .shadow_ram_support = true, 434 .shadow_ram_support = true,
387 .ht_greenfield_support = true, 435 .ht_greenfield_support = true,
@@ -452,7 +500,6 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
452 .pa_type = IWL_PA_INTERNAL, 500 .pa_type = IWL_PA_INTERNAL,
453 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 501 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
454 .shadow_ram_support = true, 502 .shadow_ram_support = true,
455 .ht_greenfield_support = true,
456 .led_compensation = 51, 503 .led_compensation = 51,
457 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 504 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
458 .supports_idle = true, 505 .supports_idle = true,
@@ -485,7 +532,6 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
485 .pa_type = IWL_PA_INTERNAL, 532 .pa_type = IWL_PA_INTERNAL,
486 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 533 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
487 .shadow_ram_support = true, 534 .shadow_ram_support = true,
488 .ht_greenfield_support = true,
489 .led_compensation = 51, 535 .led_compensation = 51,
490 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 536 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
491 .supports_idle = true, 537 .supports_idle = true,
@@ -552,7 +598,6 @@ struct iwl_cfg iwl6050_2abg_cfg = {
552 .pa_type = IWL_PA_SYSTEM, 598 .pa_type = IWL_PA_SYSTEM,
553 .max_ll_items = OTP_MAX_LL_ITEMS_6x50, 599 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
554 .shadow_ram_support = true, 600 .shadow_ram_support = true,
555 .ht_greenfield_support = true,
556 .led_compensation = 51, 601 .led_compensation = 51,
557 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 602 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
558 .supports_idle = true, 603 .supports_idle = true,
@@ -600,3 +645,4 @@ struct iwl_cfg iwl6000_3agn_cfg = {
600 645
601MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); 646MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
602MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX)); 647MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
648MODULE_FIRMWARE(IWL6000G2_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
new file mode 100644
index 000000000000..f249b706bf17
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
@@ -0,0 +1,834 @@
1/******************************************************************************
2*
3* GPL LICENSE SUMMARY
4*
5* Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6*
7* This program is free software; you can redistribute it and/or modify
8* it under the terms of version 2 of the GNU General Public License as
9* published by the Free Software Foundation.
10*
11* This program is distributed in the hope that it will be useful, but
12* WITHOUT ANY WARRANTY; without even the implied warranty of
13* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14* General Public License for more details.
15*
16* You should have received a copy of the GNU General Public License
17* along with this program; if not, write to the Free Software
18* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19* USA
20*
21* The full GNU General Public License is included in this distribution
22* in the file called LICENSE.GPL.
23*
24* Contact Information:
25* Intel Linux Wireless <ilw@linux.intel.com>
26* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27*****************************************************************************/
28
29#include "iwl-agn-debugfs.h"
30
31ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
32 size_t count, loff_t *ppos)
33 {
34 struct iwl_priv *priv = file->private_data;
35 int pos = 0;
36 char *buf;
37 int bufsz = sizeof(struct statistics_rx_phy) * 40 +
38 sizeof(struct statistics_rx_non_phy) * 40 +
39 sizeof(struct statistics_rx_ht_phy) * 40 + 400;
40 ssize_t ret;
41 struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
42 struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
43 struct statistics_rx_non_phy *general, *accum_general;
44 struct statistics_rx_non_phy *delta_general, *max_general;
45 struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
46
47 if (!iwl_is_alive(priv))
48 return -EAGAIN;
49
50 buf = kzalloc(bufsz, GFP_KERNEL);
51 if (!buf) {
52 IWL_ERR(priv, "Can not allocate Buffer\n");
53 return -ENOMEM;
54 }
55
56 /*
57 * the statistic information display here is based on
58 * the last statistics notification from uCode
59 * might not reflect the current uCode activity
60 */
61 ofdm = &priv->statistics.rx.ofdm;
62 cck = &priv->statistics.rx.cck;
63 general = &priv->statistics.rx.general;
64 ht = &priv->statistics.rx.ofdm_ht;
65 accum_ofdm = &priv->accum_statistics.rx.ofdm;
66 accum_cck = &priv->accum_statistics.rx.cck;
67 accum_general = &priv->accum_statistics.rx.general;
68 accum_ht = &priv->accum_statistics.rx.ofdm_ht;
69 delta_ofdm = &priv->delta_statistics.rx.ofdm;
70 delta_cck = &priv->delta_statistics.rx.cck;
71 delta_general = &priv->delta_statistics.rx.general;
72 delta_ht = &priv->delta_statistics.rx.ofdm_ht;
73 max_ofdm = &priv->max_delta.rx.ofdm;
74 max_cck = &priv->max_delta.rx.cck;
75 max_general = &priv->max_delta.rx.general;
76 max_ht = &priv->max_delta.rx.ofdm_ht;
77
78 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
79 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
80 "acumulative delta max\n",
81 "Statistics_Rx - OFDM:");
82 pos += scnprintf(buf + pos, bufsz - pos,
83 " %-30s %10u %10u %10u %10u\n",
84 "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
85 accum_ofdm->ina_cnt,
86 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
87 pos += scnprintf(buf + pos, bufsz - pos,
88 " %-30s %10u %10u %10u %10u\n",
89 "fina_cnt:",
90 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
91 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
92 pos += scnprintf(buf + pos, bufsz - pos,
93 " %-30s %10u %10u %10u %10u\n",
94 "plcp_err:",
95 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
96 delta_ofdm->plcp_err, max_ofdm->plcp_err);
97 pos += scnprintf(buf + pos, bufsz - pos,
98 " %-30s %10u %10u %10u %10u\n", "crc32_err:",
99 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
100 delta_ofdm->crc32_err, max_ofdm->crc32_err);
101 pos += scnprintf(buf + pos, bufsz - pos,
102 " %-30s %10u %10u %10u %10u\n", "overrun_err:",
103 le32_to_cpu(ofdm->overrun_err),
104 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
105 max_ofdm->overrun_err);
106 pos += scnprintf(buf + pos, bufsz - pos,
107 " %-30s %10u %10u %10u %10u\n",
108 "early_overrun_err:",
109 le32_to_cpu(ofdm->early_overrun_err),
110 accum_ofdm->early_overrun_err,
111 delta_ofdm->early_overrun_err,
112 max_ofdm->early_overrun_err);
113 pos += scnprintf(buf + pos, bufsz - pos,
114 " %-30s %10u %10u %10u %10u\n",
115 "crc32_good:", le32_to_cpu(ofdm->crc32_good),
116 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
117 max_ofdm->crc32_good);
118 pos += scnprintf(buf + pos, bufsz - pos,
119 " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
120 le32_to_cpu(ofdm->false_alarm_cnt),
121 accum_ofdm->false_alarm_cnt,
122 delta_ofdm->false_alarm_cnt,
123 max_ofdm->false_alarm_cnt);
124 pos += scnprintf(buf + pos, bufsz - pos,
125 " %-30s %10u %10u %10u %10u\n",
126 "fina_sync_err_cnt:",
127 le32_to_cpu(ofdm->fina_sync_err_cnt),
128 accum_ofdm->fina_sync_err_cnt,
129 delta_ofdm->fina_sync_err_cnt,
130 max_ofdm->fina_sync_err_cnt);
131 pos += scnprintf(buf + pos, bufsz - pos,
132 " %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
133 le32_to_cpu(ofdm->sfd_timeout),
134 accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
135 max_ofdm->sfd_timeout);
136 pos += scnprintf(buf + pos, bufsz - pos,
137 " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
138 le32_to_cpu(ofdm->fina_timeout),
139 accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
140 max_ofdm->fina_timeout);
141 pos += scnprintf(buf + pos, bufsz - pos,
142 " %-30s %10u %10u %10u %10u\n",
143 "unresponded_rts:",
144 le32_to_cpu(ofdm->unresponded_rts),
145 accum_ofdm->unresponded_rts,
146 delta_ofdm->unresponded_rts,
147 max_ofdm->unresponded_rts);
148 pos += scnprintf(buf + pos, bufsz - pos,
149 " %-30s %10u %10u %10u %10u\n",
150 "rxe_frame_lmt_ovrun:",
151 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
152 accum_ofdm->rxe_frame_limit_overrun,
153 delta_ofdm->rxe_frame_limit_overrun,
154 max_ofdm->rxe_frame_limit_overrun);
155 pos += scnprintf(buf + pos, bufsz - pos,
156 " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
157 le32_to_cpu(ofdm->sent_ack_cnt),
158 accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
159 max_ofdm->sent_ack_cnt);
160 pos += scnprintf(buf + pos, bufsz - pos,
161 " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
162 le32_to_cpu(ofdm->sent_cts_cnt),
163 accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
164 max_ofdm->sent_cts_cnt);
165 pos += scnprintf(buf + pos, bufsz - pos,
166 " %-30s %10u %10u %10u %10u\n",
167 "sent_ba_rsp_cnt:",
168 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
169 accum_ofdm->sent_ba_rsp_cnt,
170 delta_ofdm->sent_ba_rsp_cnt,
171 max_ofdm->sent_ba_rsp_cnt);
172 pos += scnprintf(buf + pos, bufsz - pos,
173 " %-30s %10u %10u %10u %10u\n", "dsp_self_kill:",
174 le32_to_cpu(ofdm->dsp_self_kill),
175 accum_ofdm->dsp_self_kill,
176 delta_ofdm->dsp_self_kill,
177 max_ofdm->dsp_self_kill);
178 pos += scnprintf(buf + pos, bufsz - pos,
179 " %-30s %10u %10u %10u %10u\n",
180 "mh_format_err:",
181 le32_to_cpu(ofdm->mh_format_err),
182 accum_ofdm->mh_format_err,
183 delta_ofdm->mh_format_err,
184 max_ofdm->mh_format_err);
185 pos += scnprintf(buf + pos, bufsz - pos,
186 " %-30s %10u %10u %10u %10u\n",
187 "re_acq_main_rssi_sum:",
188 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
189 accum_ofdm->re_acq_main_rssi_sum,
190 delta_ofdm->re_acq_main_rssi_sum,
191 max_ofdm->re_acq_main_rssi_sum);
192
193 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
194 "acumulative delta max\n",
195 "Statistics_Rx - CCK:");
196 pos += scnprintf(buf + pos, bufsz - pos,
197 " %-30s %10u %10u %10u %10u\n",
198 "ina_cnt:",
199 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
200 delta_cck->ina_cnt, max_cck->ina_cnt);
201 pos += scnprintf(buf + pos, bufsz - pos,
202 " %-30s %10u %10u %10u %10u\n",
203 "fina_cnt:",
204 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
205 delta_cck->fina_cnt, max_cck->fina_cnt);
206 pos += scnprintf(buf + pos, bufsz - pos,
207 " %-30s %10u %10u %10u %10u\n",
208 "plcp_err:",
209 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
210 delta_cck->plcp_err, max_cck->plcp_err);
211 pos += scnprintf(buf + pos, bufsz - pos,
212 " %-30s %10u %10u %10u %10u\n",
213 "crc32_err:",
214 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
215 delta_cck->crc32_err, max_cck->crc32_err);
216 pos += scnprintf(buf + pos, bufsz - pos,
217 " %-30s %10u %10u %10u %10u\n",
218 "overrun_err:",
219 le32_to_cpu(cck->overrun_err),
220 accum_cck->overrun_err, delta_cck->overrun_err,
221 max_cck->overrun_err);
222 pos += scnprintf(buf + pos, bufsz - pos,
223 " %-30s %10u %10u %10u %10u\n",
224 "early_overrun_err:",
225 le32_to_cpu(cck->early_overrun_err),
226 accum_cck->early_overrun_err,
227 delta_cck->early_overrun_err,
228 max_cck->early_overrun_err);
229 pos += scnprintf(buf + pos, bufsz - pos,
230 " %-30s %10u %10u %10u %10u\n",
231 "crc32_good:",
232 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
233 delta_cck->crc32_good, max_cck->crc32_good);
234 pos += scnprintf(buf + pos, bufsz - pos,
235 " %-30s %10u %10u %10u %10u\n",
236 "false_alarm_cnt:",
237 le32_to_cpu(cck->false_alarm_cnt),
238 accum_cck->false_alarm_cnt,
239 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
240 pos += scnprintf(buf + pos, bufsz - pos,
241 " %-30s %10u %10u %10u %10u\n",
242 "fina_sync_err_cnt:",
243 le32_to_cpu(cck->fina_sync_err_cnt),
244 accum_cck->fina_sync_err_cnt,
245 delta_cck->fina_sync_err_cnt,
246 max_cck->fina_sync_err_cnt);
247 pos += scnprintf(buf + pos, bufsz - pos,
248 " %-30s %10u %10u %10u %10u\n",
249 "sfd_timeout:",
250 le32_to_cpu(cck->sfd_timeout),
251 accum_cck->sfd_timeout, delta_cck->sfd_timeout,
252 max_cck->sfd_timeout);
253 pos += scnprintf(buf + pos, bufsz - pos,
254 " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
255 le32_to_cpu(cck->fina_timeout),
256 accum_cck->fina_timeout, delta_cck->fina_timeout,
257 max_cck->fina_timeout);
258 pos += scnprintf(buf + pos, bufsz - pos,
259 " %-30s %10u %10u %10u %10u\n",
260 "unresponded_rts:",
261 le32_to_cpu(cck->unresponded_rts),
262 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
263 max_cck->unresponded_rts);
264 pos += scnprintf(buf + pos, bufsz - pos,
265 " %-30s %10u %10u %10u %10u\n",
266 "rxe_frame_lmt_ovrun:",
267 le32_to_cpu(cck->rxe_frame_limit_overrun),
268 accum_cck->rxe_frame_limit_overrun,
269 delta_cck->rxe_frame_limit_overrun,
270 max_cck->rxe_frame_limit_overrun);
271 pos += scnprintf(buf + pos, bufsz - pos,
272 " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
273 le32_to_cpu(cck->sent_ack_cnt),
274 accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
275 max_cck->sent_ack_cnt);
276 pos += scnprintf(buf + pos, bufsz - pos,
277 " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
278 le32_to_cpu(cck->sent_cts_cnt),
279 accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
280 max_cck->sent_cts_cnt);
281 pos += scnprintf(buf + pos, bufsz - pos,
282 " %-30s %10u %10u %10u %10u\n", "sent_ba_rsp_cnt:",
283 le32_to_cpu(cck->sent_ba_rsp_cnt),
284 accum_cck->sent_ba_rsp_cnt,
285 delta_cck->sent_ba_rsp_cnt,
286 max_cck->sent_ba_rsp_cnt);
287 pos += scnprintf(buf + pos, bufsz - pos,
288 " %-30s %10u %10u %10u %10u\n", "dsp_self_kill:",
289 le32_to_cpu(cck->dsp_self_kill),
290 accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
291 max_cck->dsp_self_kill);
292 pos += scnprintf(buf + pos, bufsz - pos,
293 " %-30s %10u %10u %10u %10u\n", "mh_format_err:",
294 le32_to_cpu(cck->mh_format_err),
295 accum_cck->mh_format_err, delta_cck->mh_format_err,
296 max_cck->mh_format_err);
297 pos += scnprintf(buf + pos, bufsz - pos,
298 " %-30s %10u %10u %10u %10u\n",
299 "re_acq_main_rssi_sum:",
300 le32_to_cpu(cck->re_acq_main_rssi_sum),
301 accum_cck->re_acq_main_rssi_sum,
302 delta_cck->re_acq_main_rssi_sum,
303 max_cck->re_acq_main_rssi_sum);
304
305 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
306 "acumulative delta max\n",
307 "Statistics_Rx - GENERAL:");
308 pos += scnprintf(buf + pos, bufsz - pos,
309 " %-30s %10u %10u %10u %10u\n", "bogus_cts:",
310 le32_to_cpu(general->bogus_cts),
311 accum_general->bogus_cts, delta_general->bogus_cts,
312 max_general->bogus_cts);
313 pos += scnprintf(buf + pos, bufsz - pos,
314 " %-30s %10u %10u %10u %10u\n", "bogus_ack:",
315 le32_to_cpu(general->bogus_ack),
316 accum_general->bogus_ack, delta_general->bogus_ack,
317 max_general->bogus_ack);
318 pos += scnprintf(buf + pos, bufsz - pos,
319 " %-30s %10u %10u %10u %10u\n",
320 "non_bssid_frames:",
321 le32_to_cpu(general->non_bssid_frames),
322 accum_general->non_bssid_frames,
323 delta_general->non_bssid_frames,
324 max_general->non_bssid_frames);
325 pos += scnprintf(buf + pos, bufsz - pos,
326 " %-30s %10u %10u %10u %10u\n",
327 "filtered_frames:",
328 le32_to_cpu(general->filtered_frames),
329 accum_general->filtered_frames,
330 delta_general->filtered_frames,
331 max_general->filtered_frames);
332 pos += scnprintf(buf + pos, bufsz - pos,
333 " %-30s %10u %10u %10u %10u\n",
334 "non_channel_beacons:",
335 le32_to_cpu(general->non_channel_beacons),
336 accum_general->non_channel_beacons,
337 delta_general->non_channel_beacons,
338 max_general->non_channel_beacons);
339 pos += scnprintf(buf + pos, bufsz - pos,
340 " %-30s %10u %10u %10u %10u\n",
341 "channel_beacons:",
342 le32_to_cpu(general->channel_beacons),
343 accum_general->channel_beacons,
344 delta_general->channel_beacons,
345 max_general->channel_beacons);
346 pos += scnprintf(buf + pos, bufsz - pos,
347 " %-30s %10u %10u %10u %10u\n",
348 "num_missed_bcon:",
349 le32_to_cpu(general->num_missed_bcon),
350 accum_general->num_missed_bcon,
351 delta_general->num_missed_bcon,
352 max_general->num_missed_bcon);
353 pos += scnprintf(buf + pos, bufsz - pos,
354 " %-30s %10u %10u %10u %10u\n",
355 "adc_rx_saturation_time:",
356 le32_to_cpu(general->adc_rx_saturation_time),
357 accum_general->adc_rx_saturation_time,
358 delta_general->adc_rx_saturation_time,
359 max_general->adc_rx_saturation_time);
360 pos += scnprintf(buf + pos, bufsz - pos,
361 " %-30s %10u %10u %10u %10u\n",
362 "ina_detect_search_tm:",
363 le32_to_cpu(general->ina_detection_search_time),
364 accum_general->ina_detection_search_time,
365 delta_general->ina_detection_search_time,
366 max_general->ina_detection_search_time);
367 pos += scnprintf(buf + pos, bufsz - pos,
368 " %-30s %10u %10u %10u %10u\n",
369 "beacon_silence_rssi_a:",
370 le32_to_cpu(general->beacon_silence_rssi_a),
371 accum_general->beacon_silence_rssi_a,
372 delta_general->beacon_silence_rssi_a,
373 max_general->beacon_silence_rssi_a);
374 pos += scnprintf(buf + pos, bufsz - pos,
375 " %-30s %10u %10u %10u %10u\n",
376 "beacon_silence_rssi_b:",
377 le32_to_cpu(general->beacon_silence_rssi_b),
378 accum_general->beacon_silence_rssi_b,
379 delta_general->beacon_silence_rssi_b,
380 max_general->beacon_silence_rssi_b);
381 pos += scnprintf(buf + pos, bufsz - pos,
382 " %-30s %10u %10u %10u %10u\n",
383 "beacon_silence_rssi_c:",
384 le32_to_cpu(general->beacon_silence_rssi_c),
385 accum_general->beacon_silence_rssi_c,
386 delta_general->beacon_silence_rssi_c,
387 max_general->beacon_silence_rssi_c);
388 pos += scnprintf(buf + pos, bufsz - pos,
389 " %-30s %10u %10u %10u %10u\n",
390 "interference_data_flag:",
391 le32_to_cpu(general->interference_data_flag),
392 accum_general->interference_data_flag,
393 delta_general->interference_data_flag,
394 max_general->interference_data_flag);
395 pos += scnprintf(buf + pos, bufsz - pos,
396 " %-30s %10u %10u %10u %10u\n",
397 "channel_load:",
398 le32_to_cpu(general->channel_load),
399 accum_general->channel_load,
400 delta_general->channel_load,
401 max_general->channel_load);
402 pos += scnprintf(buf + pos, bufsz - pos,
403 " %-30s %10u %10u %10u %10u\n",
404 "dsp_false_alarms:",
405 le32_to_cpu(general->dsp_false_alarms),
406 accum_general->dsp_false_alarms,
407 delta_general->dsp_false_alarms,
408 max_general->dsp_false_alarms);
409 pos += scnprintf(buf + pos, bufsz - pos,
410 " %-30s %10u %10u %10u %10u\n",
411 "beacon_rssi_a:",
412 le32_to_cpu(general->beacon_rssi_a),
413 accum_general->beacon_rssi_a,
414 delta_general->beacon_rssi_a,
415 max_general->beacon_rssi_a);
416 pos += scnprintf(buf + pos, bufsz - pos,
417 " %-30s %10u %10u %10u %10u\n",
418 "beacon_rssi_b:",
419 le32_to_cpu(general->beacon_rssi_b),
420 accum_general->beacon_rssi_b,
421 delta_general->beacon_rssi_b,
422 max_general->beacon_rssi_b);
423 pos += scnprintf(buf + pos, bufsz - pos,
424 " %-30s %10u %10u %10u %10u\n",
425 "beacon_rssi_c:",
426 le32_to_cpu(general->beacon_rssi_c),
427 accum_general->beacon_rssi_c,
428 delta_general->beacon_rssi_c,
429 max_general->beacon_rssi_c);
430 pos += scnprintf(buf + pos, bufsz - pos,
431 " %-30s %10u %10u %10u %10u\n",
432 "beacon_energy_a:",
433 le32_to_cpu(general->beacon_energy_a),
434 accum_general->beacon_energy_a,
435 delta_general->beacon_energy_a,
436 max_general->beacon_energy_a);
437 pos += scnprintf(buf + pos, bufsz - pos,
438 " %-30s %10u %10u %10u %10u\n",
439 "beacon_energy_b:",
440 le32_to_cpu(general->beacon_energy_b),
441 accum_general->beacon_energy_b,
442 delta_general->beacon_energy_b,
443 max_general->beacon_energy_b);
444 pos += scnprintf(buf + pos, bufsz - pos,
445 " %-30s %10u %10u %10u %10u\n",
446 "beacon_energy_c:",
447 le32_to_cpu(general->beacon_energy_c),
448 accum_general->beacon_energy_c,
449 delta_general->beacon_energy_c,
450 max_general->beacon_energy_c);
451
452 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n");
453 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
454 "acumulative delta max\n",
455 "Statistics_Rx - OFDM_HT:");
456 pos += scnprintf(buf + pos, bufsz - pos,
457 " %-30s %10u %10u %10u %10u\n",
458 "plcp_err:",
459 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
460 delta_ht->plcp_err, max_ht->plcp_err);
461 pos += scnprintf(buf + pos, bufsz - pos,
462 " %-30s %10u %10u %10u %10u\n",
463 "overrun_err:",
464 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
465 delta_ht->overrun_err, max_ht->overrun_err);
466 pos += scnprintf(buf + pos, bufsz - pos,
467 " %-30s %10u %10u %10u %10u\n",
468 "early_overrun_err:",
469 le32_to_cpu(ht->early_overrun_err),
470 accum_ht->early_overrun_err,
471 delta_ht->early_overrun_err,
472 max_ht->early_overrun_err);
473 pos += scnprintf(buf + pos, bufsz - pos,
474 " %-30s %10u %10u %10u %10u\n",
475 "crc32_good:",
476 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
477 delta_ht->crc32_good, max_ht->crc32_good);
478 pos += scnprintf(buf + pos, bufsz - pos,
479 " %-30s %10u %10u %10u %10u\n",
480 "crc32_err:",
481 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
482 delta_ht->crc32_err, max_ht->crc32_err);
483 pos += scnprintf(buf + pos, bufsz - pos,
484 " %-30s %10u %10u %10u %10u\n",
485 "mh_format_err:",
486 le32_to_cpu(ht->mh_format_err),
487 accum_ht->mh_format_err,
488 delta_ht->mh_format_err, max_ht->mh_format_err);
489 pos += scnprintf(buf + pos, bufsz - pos,
490 " %-30s %10u %10u %10u %10u\n",
491 "agg_crc32_good:",
492 le32_to_cpu(ht->agg_crc32_good),
493 accum_ht->agg_crc32_good,
494 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
495 pos += scnprintf(buf + pos, bufsz - pos,
496 " %-30s %10u %10u %10u %10u\n",
497 "agg_mpdu_cnt:",
498 le32_to_cpu(ht->agg_mpdu_cnt),
499 accum_ht->agg_mpdu_cnt,
500 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
501 pos += scnprintf(buf + pos, bufsz - pos,
502 " %-30s %10u %10u %10u %10u\n",
503 "agg_cnt:",
504 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
505 delta_ht->agg_cnt, max_ht->agg_cnt);
506 pos += scnprintf(buf + pos, bufsz - pos,
507 " %-30s %10u %10u %10u %10u\n",
508 "unsupport_mcs:",
509 le32_to_cpu(ht->unsupport_mcs),
510 accum_ht->unsupport_mcs,
511 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
512
513 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
514 kfree(buf);
515 return ret;
516}
517
518ssize_t iwl_ucode_tx_stats_read(struct file *file,
519 char __user *user_buf,
520 size_t count, loff_t *ppos)
521{
522 struct iwl_priv *priv = file->private_data;
523 int pos = 0;
524 char *buf;
525 int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
526 ssize_t ret;
527 struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
528
529 if (!iwl_is_alive(priv))
530 return -EAGAIN;
531
532 buf = kzalloc(bufsz, GFP_KERNEL);
533 if (!buf) {
534 IWL_ERR(priv, "Can not allocate Buffer\n");
535 return -ENOMEM;
536 }
537
538 /* the statistic information display here is based on
539 * the last statistics notification from uCode
540 * might not reflect the current uCode activity
541 */
542 tx = &priv->statistics.tx;
543 accum_tx = &priv->accum_statistics.tx;
544 delta_tx = &priv->delta_statistics.tx;
545 max_tx = &priv->max_delta.tx;
546 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
547 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
548 "acumulative delta max\n",
549 "Statistics_Tx:");
550 pos += scnprintf(buf + pos, bufsz - pos,
551 " %-30s %10u %10u %10u %10u\n",
552 "preamble:",
553 le32_to_cpu(tx->preamble_cnt),
554 accum_tx->preamble_cnt,
555 delta_tx->preamble_cnt, max_tx->preamble_cnt);
556 pos += scnprintf(buf + pos, bufsz - pos,
557 " %-30s %10u %10u %10u %10u\n",
558 "rx_detected_cnt:",
559 le32_to_cpu(tx->rx_detected_cnt),
560 accum_tx->rx_detected_cnt,
561 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
562 pos += scnprintf(buf + pos, bufsz - pos,
563 " %-30s %10u %10u %10u %10u\n",
564 "bt_prio_defer_cnt:",
565 le32_to_cpu(tx->bt_prio_defer_cnt),
566 accum_tx->bt_prio_defer_cnt,
567 delta_tx->bt_prio_defer_cnt,
568 max_tx->bt_prio_defer_cnt);
569 pos += scnprintf(buf + pos, bufsz - pos,
570 " %-30s %10u %10u %10u %10u\n",
571 "bt_prio_kill_cnt:",
572 le32_to_cpu(tx->bt_prio_kill_cnt),
573 accum_tx->bt_prio_kill_cnt,
574 delta_tx->bt_prio_kill_cnt,
575 max_tx->bt_prio_kill_cnt);
576 pos += scnprintf(buf + pos, bufsz - pos,
577 " %-30s %10u %10u %10u %10u\n",
578 "few_bytes_cnt:",
579 le32_to_cpu(tx->few_bytes_cnt),
580 accum_tx->few_bytes_cnt,
581 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
582 pos += scnprintf(buf + pos, bufsz - pos,
583 " %-30s %10u %10u %10u %10u\n",
584 "cts_timeout:",
585 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
586 delta_tx->cts_timeout, max_tx->cts_timeout);
587 pos += scnprintf(buf + pos, bufsz - pos,
588 " %-30s %10u %10u %10u %10u\n",
589 "ack_timeout:",
590 le32_to_cpu(tx->ack_timeout),
591 accum_tx->ack_timeout,
592 delta_tx->ack_timeout, max_tx->ack_timeout);
593 pos += scnprintf(buf + pos, bufsz - pos,
594 " %-30s %10u %10u %10u %10u\n",
595 "expected_ack_cnt:",
596 le32_to_cpu(tx->expected_ack_cnt),
597 accum_tx->expected_ack_cnt,
598 delta_tx->expected_ack_cnt,
599 max_tx->expected_ack_cnt);
600 pos += scnprintf(buf + pos, bufsz - pos,
601 " %-30s %10u %10u %10u %10u\n",
602 "actual_ack_cnt:",
603 le32_to_cpu(tx->actual_ack_cnt),
604 accum_tx->actual_ack_cnt,
605 delta_tx->actual_ack_cnt,
606 max_tx->actual_ack_cnt);
607 pos += scnprintf(buf + pos, bufsz - pos,
608 " %-30s %10u %10u %10u %10u\n",
609 "dump_msdu_cnt:",
610 le32_to_cpu(tx->dump_msdu_cnt),
611 accum_tx->dump_msdu_cnt,
612 delta_tx->dump_msdu_cnt,
613 max_tx->dump_msdu_cnt);
614 pos += scnprintf(buf + pos, bufsz - pos,
615 " %-30s %10u %10u %10u %10u\n",
616 "abort_nxt_frame_mismatch:",
617 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
618 accum_tx->burst_abort_next_frame_mismatch_cnt,
619 delta_tx->burst_abort_next_frame_mismatch_cnt,
620 max_tx->burst_abort_next_frame_mismatch_cnt);
621 pos += scnprintf(buf + pos, bufsz - pos,
622 " %-30s %10u %10u %10u %10u\n",
623 "abort_missing_nxt_frame:",
624 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
625 accum_tx->burst_abort_missing_next_frame_cnt,
626 delta_tx->burst_abort_missing_next_frame_cnt,
627 max_tx->burst_abort_missing_next_frame_cnt);
628 pos += scnprintf(buf + pos, bufsz - pos,
629 " %-30s %10u %10u %10u %10u\n",
630 "cts_timeout_collision:",
631 le32_to_cpu(tx->cts_timeout_collision),
632 accum_tx->cts_timeout_collision,
633 delta_tx->cts_timeout_collision,
634 max_tx->cts_timeout_collision);
635 pos += scnprintf(buf + pos, bufsz - pos,
636 " %-30s %10u %10u %10u %10u\n",
637 "ack_ba_timeout_collision:",
638 le32_to_cpu(tx->ack_or_ba_timeout_collision),
639 accum_tx->ack_or_ba_timeout_collision,
640 delta_tx->ack_or_ba_timeout_collision,
641 max_tx->ack_or_ba_timeout_collision);
642 pos += scnprintf(buf + pos, bufsz - pos,
643 " %-30s %10u %10u %10u %10u\n",
644 "agg ba_timeout:",
645 le32_to_cpu(tx->agg.ba_timeout),
646 accum_tx->agg.ba_timeout,
647 delta_tx->agg.ba_timeout,
648 max_tx->agg.ba_timeout);
649 pos += scnprintf(buf + pos, bufsz - pos,
650 " %-30s %10u %10u %10u %10u\n",
651 "agg ba_resched_frames:",
652 le32_to_cpu(tx->agg.ba_reschedule_frames),
653 accum_tx->agg.ba_reschedule_frames,
654 delta_tx->agg.ba_reschedule_frames,
655 max_tx->agg.ba_reschedule_frames);
656 pos += scnprintf(buf + pos, bufsz - pos,
657 " %-30s %10u %10u %10u %10u\n",
658 "agg scd_query_agg_frame:",
659 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
660 accum_tx->agg.scd_query_agg_frame_cnt,
661 delta_tx->agg.scd_query_agg_frame_cnt,
662 max_tx->agg.scd_query_agg_frame_cnt);
663 pos += scnprintf(buf + pos, bufsz - pos,
664 " %-30s %10u %10u %10u %10u\n",
665 "agg scd_query_no_agg:",
666 le32_to_cpu(tx->agg.scd_query_no_agg),
667 accum_tx->agg.scd_query_no_agg,
668 delta_tx->agg.scd_query_no_agg,
669 max_tx->agg.scd_query_no_agg);
670 pos += scnprintf(buf + pos, bufsz - pos,
671 " %-30s %10u %10u %10u %10u\n",
672 "agg scd_query_agg:",
673 le32_to_cpu(tx->agg.scd_query_agg),
674 accum_tx->agg.scd_query_agg,
675 delta_tx->agg.scd_query_agg,
676 max_tx->agg.scd_query_agg);
677 pos += scnprintf(buf + pos, bufsz - pos,
678 " %-30s %10u %10u %10u %10u\n",
679 "agg scd_query_mismatch:",
680 le32_to_cpu(tx->agg.scd_query_mismatch),
681 accum_tx->agg.scd_query_mismatch,
682 delta_tx->agg.scd_query_mismatch,
683 max_tx->agg.scd_query_mismatch);
684 pos += scnprintf(buf + pos, bufsz - pos,
685 " %-30s %10u %10u %10u %10u\n",
686 "agg frame_not_ready:",
687 le32_to_cpu(tx->agg.frame_not_ready),
688 accum_tx->agg.frame_not_ready,
689 delta_tx->agg.frame_not_ready,
690 max_tx->agg.frame_not_ready);
691 pos += scnprintf(buf + pos, bufsz - pos,
692 " %-30s %10u %10u %10u %10u\n",
693 "agg underrun:",
694 le32_to_cpu(tx->agg.underrun),
695 accum_tx->agg.underrun,
696 delta_tx->agg.underrun, max_tx->agg.underrun);
697 pos += scnprintf(buf + pos, bufsz - pos,
698 " %-30s %10u %10u %10u %10u\n",
699 "agg bt_prio_kill:",
700 le32_to_cpu(tx->agg.bt_prio_kill),
701 accum_tx->agg.bt_prio_kill,
702 delta_tx->agg.bt_prio_kill,
703 max_tx->agg.bt_prio_kill);
704 pos += scnprintf(buf + pos, bufsz - pos,
705 " %-30s %10u %10u %10u %10u\n",
706 "agg rx_ba_rsp_cnt:",
707 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
708 accum_tx->agg.rx_ba_rsp_cnt,
709 delta_tx->agg.rx_ba_rsp_cnt,
710 max_tx->agg.rx_ba_rsp_cnt);
711
712 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
713 kfree(buf);
714 return ret;
715}
716
717ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
718 size_t count, loff_t *ppos)
719{
720 struct iwl_priv *priv = file->private_data;
721 int pos = 0;
722 char *buf;
723 int bufsz = sizeof(struct statistics_general) * 10 + 300;
724 ssize_t ret;
725 struct statistics_general *general, *accum_general;
726 struct statistics_general *delta_general, *max_general;
727 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
728 struct statistics_div *div, *accum_div, *delta_div, *max_div;
729
730 if (!iwl_is_alive(priv))
731 return -EAGAIN;
732
733 buf = kzalloc(bufsz, GFP_KERNEL);
734 if (!buf) {
735 IWL_ERR(priv, "Can not allocate Buffer\n");
736 return -ENOMEM;
737 }
738
739 /* the statistic information display here is based on
740 * the last statistics notification from uCode
741 * might not reflect the current uCode activity
742 */
743 general = &priv->statistics.general;
744 dbg = &priv->statistics.general.dbg;
745 div = &priv->statistics.general.div;
746 accum_general = &priv->accum_statistics.general;
747 delta_general = &priv->delta_statistics.general;
748 max_general = &priv->max_delta.general;
749 accum_dbg = &priv->accum_statistics.general.dbg;
750 delta_dbg = &priv->delta_statistics.general.dbg;
751 max_dbg = &priv->max_delta.general.dbg;
752 accum_div = &priv->accum_statistics.general.div;
753 delta_div = &priv->delta_statistics.general.div;
754 max_div = &priv->max_delta.general.div;
755 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
756 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
757 "acumulative delta max\n",
758 "Statistics_General:");
759 pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u\n",
760 "temperature:",
761 le32_to_cpu(general->temperature));
762 pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u\n",
763 "temperature_m:",
764 le32_to_cpu(general->temperature_m));
765 pos += scnprintf(buf + pos, bufsz - pos,
766 " %-30s %10u %10u %10u %10u\n",
767 "burst_check:",
768 le32_to_cpu(dbg->burst_check),
769 accum_dbg->burst_check,
770 delta_dbg->burst_check, max_dbg->burst_check);
771 pos += scnprintf(buf + pos, bufsz - pos,
772 " %-30s %10u %10u %10u %10u\n",
773 "burst_count:",
774 le32_to_cpu(dbg->burst_count),
775 accum_dbg->burst_count,
776 delta_dbg->burst_count, max_dbg->burst_count);
777 pos += scnprintf(buf + pos, bufsz - pos,
778 " %-30s %10u %10u %10u %10u\n",
779 "sleep_time:",
780 le32_to_cpu(general->sleep_time),
781 accum_general->sleep_time,
782 delta_general->sleep_time, max_general->sleep_time);
783 pos += scnprintf(buf + pos, bufsz - pos,
784 " %-30s %10u %10u %10u %10u\n",
785 "slots_out:",
786 le32_to_cpu(general->slots_out),
787 accum_general->slots_out,
788 delta_general->slots_out, max_general->slots_out);
789 pos += scnprintf(buf + pos, bufsz - pos,
790 " %-30s %10u %10u %10u %10u\n",
791 "slots_idle:",
792 le32_to_cpu(general->slots_idle),
793 accum_general->slots_idle,
794 delta_general->slots_idle, max_general->slots_idle);
795 pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
796 le32_to_cpu(general->ttl_timestamp));
797 pos += scnprintf(buf + pos, bufsz - pos,
798 " %-30s %10u %10u %10u %10u\n",
799 "tx_on_a:",
800 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
801 delta_div->tx_on_a, max_div->tx_on_a);
802 pos += scnprintf(buf + pos, bufsz - pos,
803 " %-30s %10u %10u %10u %10u\n",
804 "tx_on_b:",
805 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
806 delta_div->tx_on_b, max_div->tx_on_b);
807 pos += scnprintf(buf + pos, bufsz - pos,
808 " %-30s %10u %10u %10u %10u\n",
809 "exec_time:",
810 le32_to_cpu(div->exec_time), accum_div->exec_time,
811 delta_div->exec_time, max_div->exec_time);
812 pos += scnprintf(buf + pos, bufsz - pos,
813 " %-30s %10u %10u %10u %10u\n",
814 "probe_time:",
815 le32_to_cpu(div->probe_time), accum_div->probe_time,
816 delta_div->probe_time, max_div->probe_time);
817 pos += scnprintf(buf + pos, bufsz - pos,
818 " %-30s %10u %10u %10u %10u\n",
819 "rx_enable_counter:",
820 le32_to_cpu(general->rx_enable_counter),
821 accum_general->rx_enable_counter,
822 delta_general->rx_enable_counter,
823 max_general->rx_enable_counter);
824 pos += scnprintf(buf + pos, bufsz - pos,
825 " %-30s %10u %10u %10u %10u\n",
826 "num_of_sos_states:",
827 le32_to_cpu(general->num_of_sos_states),
828 accum_general->num_of_sos_states,
829 delta_general->num_of_sos_states,
830 max_general->num_of_sos_states);
831 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
832 kfree(buf);
833 return ret;
834}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h
new file mode 100644
index 000000000000..59b1f25f0d85
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h
@@ -0,0 +1,56 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-dev.h"
30#include "iwl-core.h"
31#include "iwl-debug.h"
32
33#ifdef CONFIG_IWLWIFI_DEBUGFS
34ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos);
36ssize_t iwl_ucode_tx_stats_read(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos);
38ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
39 size_t count, loff_t *ppos);
40#else
41static ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
42 size_t count, loff_t *ppos)
43{
44 return 0;
45}
46static ssize_t iwl_ucode_tx_stats_read(struct file *file, char __user *user_buf,
47 size_t count, loff_t *ppos)
48{
49 return 0;
50}
51static ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
52 size_t count, loff_t *ppos)
53{
54 return 0;
55}
56#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index 28bc8f8ba981..44ef5d93befc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -262,6 +262,7 @@ struct iwl_hcmd_ops iwlagn_hcmd = {
262 .commit_rxon = iwl_commit_rxon, 262 .commit_rxon = iwl_commit_rxon,
263 .set_rxon_chain = iwl_set_rxon_chain, 263 .set_rxon_chain = iwl_set_rxon_chain,
264 .set_tx_ant = iwlagn_send_tx_ant_config, 264 .set_tx_ant = iwlagn_send_tx_ant_config,
265 .send_bt_config = iwl_send_bt_config,
265}; 266};
266 267
267struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = { 268struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = {
@@ -271,4 +272,5 @@ struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = {
271 .chain_noise_reset = iwlagn_chain_noise_reset, 272 .chain_noise_reset = iwlagn_chain_noise_reset,
272 .rts_tx_cmd_flag = iwlagn_rts_tx_cmd_flag, 273 .rts_tx_cmd_flag = iwlagn_rts_tx_cmd_flag,
273 .calc_rssi = iwlagn_calc_rssi, 274 .calc_rssi = iwlagn_calc_rssi,
275 .request_scan = iwlagn_request_scan,
274}; 276};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index c465c8590833..a27347425968 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -331,7 +331,7 @@ u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
331 } *hdr; 331 } *hdr;
332 332
333 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv, 333 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
334 EEPROM_5000_CALIB_ALL); 334 EEPROM_CALIB_ALL);
335 return hdr->version; 335 return hdr->version;
336 336
337} 337}
@@ -348,22 +348,22 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
348 348
349 switch (address & INDIRECT_TYPE_MSK) { 349 switch (address & INDIRECT_TYPE_MSK) {
350 case INDIRECT_HOST: 350 case INDIRECT_HOST:
351 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_HOST); 351 offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
352 break; 352 break;
353 case INDIRECT_GENERAL: 353 case INDIRECT_GENERAL:
354 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_GENERAL); 354 offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
355 break; 355 break;
356 case INDIRECT_REGULATORY: 356 case INDIRECT_REGULATORY:
357 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_REGULATORY); 357 offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
358 break; 358 break;
359 case INDIRECT_CALIBRATION: 359 case INDIRECT_CALIBRATION:
360 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_CALIBRATION); 360 offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
361 break; 361 break;
362 case INDIRECT_PROCESS_ADJST: 362 case INDIRECT_PROCESS_ADJST:
363 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_PROCESS_ADJST); 363 offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
364 break; 364 break;
365 case INDIRECT_OTHERS: 365 case INDIRECT_OTHERS:
366 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_OTHERS); 366 offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
367 break; 367 break;
368 default: 368 default:
369 IWL_ERR(priv, "illegal indirect type: 0x%X\n", 369 IWL_ERR(priv, "illegal indirect type: 0x%X\n",
@@ -1111,3 +1111,405 @@ void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
1111 memcpy(&priv->_agn.last_phy_res, pkt->u.raw, 1111 memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
1112 sizeof(struct iwl_rx_phy_res)); 1112 sizeof(struct iwl_rx_phy_res));
1113} 1113}
1114
1115static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
1116 enum ieee80211_band band,
1117 struct iwl_scan_channel *scan_ch)
1118{
1119 const struct ieee80211_supported_band *sband;
1120 const struct iwl_channel_info *ch_info;
1121 u16 passive_dwell = 0;
1122 u16 active_dwell = 0;
1123 int i, added = 0;
1124 u16 channel = 0;
1125
1126 sband = iwl_get_hw_mode(priv, band);
1127 if (!sband) {
1128 IWL_ERR(priv, "invalid band\n");
1129 return added;
1130 }
1131
1132 active_dwell = iwl_get_active_dwell_time(priv, band, 0);
1133 passive_dwell = iwl_get_passive_dwell_time(priv, band);
1134
1135 if (passive_dwell <= active_dwell)
1136 passive_dwell = active_dwell + 1;
1137
1138 /* only scan single channel, good enough to reset the RF */
1139 /* pick the first valid not in-use channel */
1140 if (band == IEEE80211_BAND_5GHZ) {
1141 for (i = 14; i < priv->channel_count; i++) {
1142 if (priv->channel_info[i].channel !=
1143 le16_to_cpu(priv->staging_rxon.channel)) {
1144 channel = priv->channel_info[i].channel;
1145 ch_info = iwl_get_channel_info(priv,
1146 band, channel);
1147 if (is_channel_valid(ch_info))
1148 break;
1149 }
1150 }
1151 } else {
1152 for (i = 0; i < 14; i++) {
1153 if (priv->channel_info[i].channel !=
1154 le16_to_cpu(priv->staging_rxon.channel)) {
1155 channel =
1156 priv->channel_info[i].channel;
1157 ch_info = iwl_get_channel_info(priv,
1158 band, channel);
1159 if (is_channel_valid(ch_info))
1160 break;
1161 }
1162 }
1163 }
1164 if (channel) {
1165 scan_ch->channel = cpu_to_le16(channel);
1166 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
1167 scan_ch->active_dwell = cpu_to_le16(active_dwell);
1168 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1169 /* Set txpower levels to defaults */
1170 scan_ch->dsp_atten = 110;
1171 if (band == IEEE80211_BAND_5GHZ)
1172 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
1173 else
1174 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
1175 added++;
1176 } else
1177 IWL_ERR(priv, "no valid channel found\n");
1178 return added;
1179}
1180
1181static int iwl_get_channels_for_scan(struct iwl_priv *priv,
1182 enum ieee80211_band band,
1183 u8 is_active, u8 n_probes,
1184 struct iwl_scan_channel *scan_ch)
1185{
1186 struct ieee80211_channel *chan;
1187 const struct ieee80211_supported_band *sband;
1188 const struct iwl_channel_info *ch_info;
1189 u16 passive_dwell = 0;
1190 u16 active_dwell = 0;
1191 int added, i;
1192 u16 channel;
1193
1194 sband = iwl_get_hw_mode(priv, band);
1195 if (!sband)
1196 return 0;
1197
1198 active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
1199 passive_dwell = iwl_get_passive_dwell_time(priv, band);
1200
1201 if (passive_dwell <= active_dwell)
1202 passive_dwell = active_dwell + 1;
1203
1204 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
1205 chan = priv->scan_request->channels[i];
1206
1207 if (chan->band != band)
1208 continue;
1209
1210 channel = ieee80211_frequency_to_channel(chan->center_freq);
1211 scan_ch->channel = cpu_to_le16(channel);
1212
1213 ch_info = iwl_get_channel_info(priv, band, channel);
1214 if (!is_channel_valid(ch_info)) {
1215 IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
1216 channel);
1217 continue;
1218 }
1219
1220 if (!is_active || is_channel_passive(ch_info) ||
1221 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
1222 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
1223 else
1224 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
1225
1226 if (n_probes)
1227 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
1228
1229 scan_ch->active_dwell = cpu_to_le16(active_dwell);
1230 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1231
1232 /* Set txpower levels to defaults */
1233 scan_ch->dsp_atten = 110;
1234
1235 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
1236 * power level:
1237 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
1238 */
1239 if (band == IEEE80211_BAND_5GHZ)
1240 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
1241 else
1242 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
1243
1244 IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
1245 channel, le32_to_cpu(scan_ch->type),
1246 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
1247 "ACTIVE" : "PASSIVE",
1248 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
1249 active_dwell : passive_dwell);
1250
1251 scan_ch++;
1252 added++;
1253 }
1254
1255 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
1256 return added;
1257}
1258
1259void iwlagn_request_scan(struct iwl_priv *priv)
1260{
1261 struct iwl_host_cmd cmd = {
1262 .id = REPLY_SCAN_CMD,
1263 .len = sizeof(struct iwl_scan_cmd),
1264 .flags = CMD_SIZE_HUGE,
1265 };
1266 struct iwl_scan_cmd *scan;
1267 struct ieee80211_conf *conf = NULL;
1268 u32 rate_flags = 0;
1269 u16 cmd_len;
1270 u16 rx_chain = 0;
1271 enum ieee80211_band band;
1272 u8 n_probes = 0;
1273 u8 rx_ant = priv->hw_params.valid_rx_ant;
1274 u8 rate;
1275 bool is_active = false;
1276 int chan_mod;
1277 u8 active_chains;
1278
1279 conf = ieee80211_get_hw_conf(priv->hw);
1280
1281 cancel_delayed_work(&priv->scan_check);
1282
1283 if (!iwl_is_ready(priv)) {
1284 IWL_WARN(priv, "request scan called when driver not ready.\n");
1285 goto done;
1286 }
1287
1288 /* Make sure the scan wasn't canceled before this queued work
1289 * was given the chance to run... */
1290 if (!test_bit(STATUS_SCANNING, &priv->status))
1291 goto done;
1292
1293 /* This should never be called or scheduled if there is currently
1294 * a scan active in the hardware. */
1295 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
1296 IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests in parallel. "
1297 "Ignoring second request.\n");
1298 goto done;
1299 }
1300
1301 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
1302 IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n");
1303 goto done;
1304 }
1305
1306 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
1307 IWL_DEBUG_HC(priv, "Scan request while abort pending. Queuing.\n");
1308 goto done;
1309 }
1310
1311 if (iwl_is_rfkill(priv)) {
1312 IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n");
1313 goto done;
1314 }
1315
1316 if (!test_bit(STATUS_READY, &priv->status)) {
1317 IWL_DEBUG_HC(priv, "Scan request while uninitialized. Queuing.\n");
1318 goto done;
1319 }
1320
1321 if (!priv->scan_cmd) {
1322 priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
1323 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
1324 if (!priv->scan_cmd) {
1325 IWL_DEBUG_SCAN(priv,
1326 "fail to allocate memory for scan\n");
1327 goto done;
1328 }
1329 }
1330 scan = priv->scan_cmd;
1331 memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
1332
1333 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
1334 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
1335
1336 if (iwl_is_associated(priv)) {
1337 u16 interval = 0;
1338 u32 extra;
1339 u32 suspend_time = 100;
1340 u32 scan_suspend_time = 100;
1341 unsigned long flags;
1342
1343 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
1344 spin_lock_irqsave(&priv->lock, flags);
1345 interval = priv->beacon_int;
1346 spin_unlock_irqrestore(&priv->lock, flags);
1347
1348 scan->suspend_time = 0;
1349 scan->max_out_time = cpu_to_le32(200 * 1024);
1350 if (!interval)
1351 interval = suspend_time;
1352
1353 extra = (suspend_time / interval) << 22;
1354 scan_suspend_time = (extra |
1355 ((suspend_time % interval) * 1024));
1356 scan->suspend_time = cpu_to_le32(scan_suspend_time);
1357 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
1358 scan_suspend_time, interval);
1359 }
1360
1361 if (priv->is_internal_short_scan) {
1362 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
1363 } else if (priv->scan_request->n_ssids) {
1364 int i, p = 0;
1365 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
1366 for (i = 0; i < priv->scan_request->n_ssids; i++) {
1367 /* always does wildcard anyway */
1368 if (!priv->scan_request->ssids[i].ssid_len)
1369 continue;
1370 scan->direct_scan[p].id = WLAN_EID_SSID;
1371 scan->direct_scan[p].len =
1372 priv->scan_request->ssids[i].ssid_len;
1373 memcpy(scan->direct_scan[p].ssid,
1374 priv->scan_request->ssids[i].ssid,
1375 priv->scan_request->ssids[i].ssid_len);
1376 n_probes++;
1377 p++;
1378 }
1379 is_active = true;
1380 } else
1381 IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
1382
1383 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
1384 scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
1385 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1386
1387 switch (priv->scan_band) {
1388 case IEEE80211_BAND_2GHZ:
1389 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
1390 chan_mod = le32_to_cpu(priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_MSK)
1391 >> RXON_FLG_CHANNEL_MODE_POS;
1392 if (chan_mod == CHANNEL_MODE_PURE_40) {
1393 rate = IWL_RATE_6M_PLCP;
1394 } else {
1395 rate = IWL_RATE_1M_PLCP;
1396 rate_flags = RATE_MCS_CCK_MSK;
1397 }
1398 scan->good_CRC_th = IWL_GOOD_CRC_TH_DISABLED;
1399 break;
1400 case IEEE80211_BAND_5GHZ:
1401 rate = IWL_RATE_6M_PLCP;
1402 /*
1403 * If active scanning is requested but a certain channel is
1404 * marked passive, we can do active scanning if we detect
1405 * transmissions.
1406 *
1407 * There is an issue with some firmware versions that triggers
1408 * a sysassert on a "good CRC threshold" of zero (== disabled),
1409 * on a radar channel even though this means that we should NOT
1410 * send probes.
1411 *
1412 * The "good CRC threshold" is the number of frames that we
1413 * need to receive during our dwell time on a channel before
1414 * sending out probes -- setting this to a huge value will
1415 * mean we never reach it, but at the same time work around
1416 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
1417 * here instead of IWL_GOOD_CRC_TH_DISABLED.
1418 */
1419 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
1420 IWL_GOOD_CRC_TH_NEVER;
1421 break;
1422 default:
1423 IWL_WARN(priv, "Invalid scan band count\n");
1424 goto done;
1425 }
1426
1427 band = priv->scan_band;
1428
1429 if (priv->cfg->scan_antennas[band])
1430 rx_ant = priv->cfg->scan_antennas[band];
1431
1432 priv->scan_tx_ant[band] =
1433 iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band]);
1434 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
1435 scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
1436
1437 /* In power save mode use one chain, otherwise use all chains */
1438 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
1439 /* rx_ant has been set to all valid chains previously */
1440 active_chains = rx_ant &
1441 ((u8)(priv->chain_noise_data.active_chains));
1442 if (!active_chains)
1443 active_chains = rx_ant;
1444
1445 IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
1446 priv->chain_noise_data.active_chains);
1447
1448 rx_ant = first_antenna(active_chains);
1449 }
1450 /* MIMO is not used here, but value is required */
1451 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
1452 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
1453 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
1454 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
1455 scan->rx_chain = cpu_to_le16(rx_chain);
1456 if (!priv->is_internal_short_scan) {
1457 cmd_len = iwl_fill_probe_req(priv,
1458 (struct ieee80211_mgmt *)scan->data,
1459 priv->scan_request->ie,
1460 priv->scan_request->ie_len,
1461 IWL_MAX_SCAN_SIZE - sizeof(*scan));
1462 } else {
1463 cmd_len = iwl_fill_probe_req(priv,
1464 (struct ieee80211_mgmt *)scan->data,
1465 NULL, 0,
1466 IWL_MAX_SCAN_SIZE - sizeof(*scan));
1467
1468 }
1469 scan->tx_cmd.len = cpu_to_le16(cmd_len);
1470
1471 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
1472 RXON_FILTER_BCON_AWARE_MSK);
1473
1474 if (priv->is_internal_short_scan) {
1475 scan->channel_count =
1476 iwl_get_single_channel_for_scan(priv, band,
1477 (void *)&scan->data[le16_to_cpu(
1478 scan->tx_cmd.len)]);
1479 } else {
1480 scan->channel_count =
1481 iwl_get_channels_for_scan(priv, band,
1482 is_active, n_probes,
1483 (void *)&scan->data[le16_to_cpu(
1484 scan->tx_cmd.len)]);
1485 }
1486 if (scan->channel_count == 0) {
1487 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
1488 goto done;
1489 }
1490
1491 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
1492 scan->channel_count * sizeof(struct iwl_scan_channel);
1493 cmd.data = scan;
1494 scan->len = cpu_to_le16(cmd.len);
1495
1496 set_bit(STATUS_SCAN_HW, &priv->status);
1497 if (iwl_send_cmd_sync(priv, &cmd))
1498 goto done;
1499
1500 queue_delayed_work(priv->workqueue, &priv->scan_check,
1501 IWL_SCAN_CHECK_WATCHDOG);
1502
1503 return;
1504
1505 done:
1506 /* Cannot perform scan. Make sure we clear scanning
1507 * bits from status so next scan request can be performed.
1508 * If we don't clear scanning status bit here all next scan
1509 * will fail
1510 */
1511 clear_bit(STATUS_SCAN_HW, &priv->status);
1512 clear_bit(STATUS_SCANNING, &priv->status);
1513 /* inform mac80211 scan aborted */
1514 queue_work(priv->workqueue, &priv->scan_completed);
1515}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index f7d85a2173c8..bfcac5608d87 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -295,11 +295,11 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
295 return tl->total; 295 return tl->total;
296} 296}
297 297
298static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, 298static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
299 struct iwl_lq_sta *lq_data, u8 tid, 299 struct iwl_lq_sta *lq_data, u8 tid,
300 struct ieee80211_sta *sta) 300 struct ieee80211_sta *sta)
301{ 301{
302 int ret; 302 int ret = -EAGAIN;
303 303
304 if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) { 304 if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
305 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", 305 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
@@ -313,29 +313,29 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
313 */ 313 */
314 IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n", 314 IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n",
315 tid); 315 tid);
316 ret = ieee80211_stop_tx_ba_session(sta, tid, 316 ieee80211_stop_tx_ba_session(sta, tid,
317 WLAN_BACK_INITIATOR); 317 WLAN_BACK_INITIATOR);
318 } 318 }
319 } 319 } else
320 IWL_ERR(priv, "Fail finding valid aggregation tid: %d\n", tid);
321 return ret;
320} 322}
321 323
322static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid, 324static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
323 struct iwl_lq_sta *lq_data, 325 struct iwl_lq_sta *lq_data,
324 struct ieee80211_sta *sta) 326 struct ieee80211_sta *sta)
325{ 327{
326 if ((tid < TID_MAX_LOAD_COUNT)) 328 if ((tid < TID_MAX_LOAD_COUNT) &&
327 rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta); 329 !rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta)) {
328 else if (tid == IWL_AGG_ALL_TID) 330 if (priv->cfg->use_rts_for_ht) {
329 for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++) 331 /*
330 rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta); 332 * switch to RTS/CTS if it is the prefer protection
331 if (priv->cfg->use_rts_for_ht) { 333 * method for HT traffic
332 /* 334 */
333 * switch to RTS/CTS if it is the prefer protection method 335 IWL_DEBUG_HT(priv, "use RTS/CTS protection for HT\n");
334 * for HT traffic 336 priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
335 */ 337 iwlcore_commit_rxon(priv);
336 IWL_DEBUG_HT(priv, "use RTS/CTS protection for HT\n"); 338 }
337 priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
338 iwlcore_commit_rxon(priv);
339 } 339 }
340} 340}
341 341
@@ -868,14 +868,14 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
868 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, 868 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
869 &rs_index); 869 &rs_index);
870 rs_collect_tx_data(curr_tbl, rs_index, 870 rs_collect_tx_data(curr_tbl, rs_index,
871 info->status.ampdu_ack_len, 871 info->status.ampdu_len,
872 info->status.ampdu_ack_map); 872 info->status.ampdu_ack_len);
873 873
874 /* Update success/fail counts if not searching for new mode */ 874 /* Update success/fail counts if not searching for new mode */
875 if (lq_sta->stay_in_tbl) { 875 if (lq_sta->stay_in_tbl) {
876 lq_sta->total_success += info->status.ampdu_ack_map; 876 lq_sta->total_success += info->status.ampdu_ack_len;
877 lq_sta->total_failed += (info->status.ampdu_ack_len - 877 lq_sta->total_failed += (info->status.ampdu_len -
878 info->status.ampdu_ack_map); 878 info->status.ampdu_ack_len);
879 } 879 }
880 } else { 880 } else {
881 /* 881 /*
@@ -2078,10 +2078,12 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2078 } 2078 }
2079 /* Else we have enough samples; calculate estimate of 2079 /* Else we have enough samples; calculate estimate of
2080 * actual average throughput */ 2080 * actual average throughput */
2081 2081 if (window->average_tpt != ((window->success_ratio *
2082 /* Sanity-check TPT calculations */ 2082 tbl->expected_tpt[index] + 64) / 128)) {
2083 BUG_ON(window->average_tpt != ((window->success_ratio * 2083 IWL_ERR(priv, "expected_tpt should have been calculated by now\n");
2084 tbl->expected_tpt[index] + 64) / 128)); 2084 window->average_tpt = ((window->success_ratio *
2085 tbl->expected_tpt[index] + 64) / 128);
2086 }
2085 2087
2086 /* If we are searching for better modulation mode, check success. */ 2088 /* If we are searching for better modulation mode, check success. */
2087 if (lq_sta->search_better_tbl && 2089 if (lq_sta->search_better_tbl &&
@@ -2558,8 +2560,17 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
2558 lq_sta->active_mimo3_rate); 2560 lq_sta->active_mimo3_rate);
2559 2561
2560 /* These values will be overridden later */ 2562 /* These values will be overridden later */
2561 lq_sta->lq.general_params.single_stream_ant_msk = ANT_A; 2563 lq_sta->lq.general_params.single_stream_ant_msk =
2562 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB; 2564 first_antenna(priv->hw_params.valid_tx_ant);
2565 lq_sta->lq.general_params.dual_stream_ant_msk =
2566 priv->hw_params.valid_tx_ant &
2567 ~first_antenna(priv->hw_params.valid_tx_ant);
2568 if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
2569 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2570 } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
2571 lq_sta->lq.general_params.dual_stream_ant_msk =
2572 priv->hw_params.valid_tx_ant;
2573 }
2563 2574
2564 /* as default allow aggregation for all tids */ 2575 /* as default allow aggregation for all tids */
2565 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID; 2576 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 3077eac58880..c2a5c85542bf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -83,6 +83,15 @@ static inline int get_fifo_from_ac(u8 ac)
83 return ac_to_fifo[ac]; 83 return ac_to_fifo[ac];
84} 84}
85 85
86static inline int get_ac_from_tid(u16 tid)
87{
88 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
89 return tid_to_ac[tid];
90
91 /* no support for TIDs 8-15 yet */
92 return -EINVAL;
93}
94
86static inline int get_fifo_from_tid(u16 tid) 95static inline int get_fifo_from_tid(u16 tid)
87{ 96{
88 if (likely(tid < ARRAY_SIZE(tid_to_ac))) 97 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
@@ -167,7 +176,7 @@ static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
167 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK; 176 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
168 177
169 tbl_dw_addr = priv->scd_base_addr + 178 tbl_dw_addr = priv->scd_base_addr +
170 IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); 179 IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
171 180
172 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr); 181 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
173 182
@@ -186,9 +195,9 @@ static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
186 /* Simply stop the queue, but don't change any configuration; 195 /* Simply stop the queue, but don't change any configuration;
187 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ 196 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
188 iwl_write_prph(priv, 197 iwl_write_prph(priv,
189 IWL50_SCD_QUEUE_STATUS_BITS(txq_id), 198 IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id),
190 (0 << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE)| 199 (0 << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
191 (1 << IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); 200 (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
192} 201}
193 202
194void iwlagn_set_wr_ptrs(struct iwl_priv *priv, 203void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
@@ -196,7 +205,7 @@ void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
196{ 205{
197 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 206 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
198 (index & 0xff) | (txq_id << 8)); 207 (index & 0xff) | (txq_id << 8));
199 iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(txq_id), index); 208 iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(txq_id), index);
200} 209}
201 210
202void iwlagn_tx_queue_set_status(struct iwl_priv *priv, 211void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
@@ -206,11 +215,11 @@ void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
206 int txq_id = txq->q.id; 215 int txq_id = txq->q.id;
207 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0; 216 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
208 217
209 iwl_write_prph(priv, IWL50_SCD_QUEUE_STATUS_BITS(txq_id), 218 iwl_write_prph(priv, IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id),
210 (active << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE) | 219 (active << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
211 (tx_fifo_id << IWL50_SCD_QUEUE_STTS_REG_POS_TXF) | 220 (tx_fifo_id << IWLAGN_SCD_QUEUE_STTS_REG_POS_TXF) |
212 (1 << IWL50_SCD_QUEUE_STTS_REG_POS_WSL) | 221 (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_WSL) |
213 IWL50_SCD_QUEUE_STTS_REG_MSK); 222 IWLAGN_SCD_QUEUE_STTS_REG_MSK);
214 223
215 txq->sched_retry = scd_retry; 224 txq->sched_retry = scd_retry;
216 225
@@ -250,10 +259,10 @@ int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
250 iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id); 259 iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
251 260
252 /* Set this queue as a chain-building queue */ 261 /* Set this queue as a chain-building queue */
253 iwl_set_bits_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, (1<<txq_id)); 262 iwl_set_bits_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL, (1<<txq_id));
254 263
255 /* enable aggregations for the queue */ 264 /* enable aggregations for the queue */
256 iwl_set_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1<<txq_id)); 265 iwl_set_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1<<txq_id));
257 266
258 /* Place first TFD at index corresponding to start sequence number. 267 /* Place first TFD at index corresponding to start sequence number.
259 * Assumes that ssn_idx is valid (!= 0xFFF) */ 268 * Assumes that ssn_idx is valid (!= 0xFFF) */
@@ -263,16 +272,16 @@ int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
263 272
264 /* Set up Tx window size and frame limit for this queue */ 273 /* Set up Tx window size and frame limit for this queue */
265 iwl_write_targ_mem(priv, priv->scd_base_addr + 274 iwl_write_targ_mem(priv, priv->scd_base_addr +
266 IWL50_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + 275 IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
267 sizeof(u32), 276 sizeof(u32),
268 ((SCD_WIN_SIZE << 277 ((SCD_WIN_SIZE <<
269 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 278 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
270 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 279 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
271 ((SCD_FRAME_LIMIT << 280 ((SCD_FRAME_LIMIT <<
272 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 281 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
273 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 282 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
274 283
275 iwl_set_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id)); 284 iwl_set_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id));
276 285
277 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ 286 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
278 iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); 287 iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
@@ -298,14 +307,14 @@ int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
298 307
299 iwlagn_tx_queue_stop_scheduler(priv, txq_id); 308 iwlagn_tx_queue_stop_scheduler(priv, txq_id);
300 309
301 iwl_clear_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1 << txq_id)); 310 iwl_clear_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1 << txq_id));
302 311
303 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); 312 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
304 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); 313 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
305 /* supposes that ssn_idx is valid (!= 0xFFF) */ 314 /* supposes that ssn_idx is valid (!= 0xFFF) */
306 iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx); 315 iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx);
307 316
308 iwl_clear_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id)); 317 iwl_clear_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id));
309 iwl_txq_ctx_deactivate(priv, txq_id); 318 iwl_txq_ctx_deactivate(priv, txq_id);
310 iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); 319 iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
311 320
@@ -318,7 +327,7 @@ int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
318 */ 327 */
319void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask) 328void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask)
320{ 329{
321 iwl_write_prph(priv, IWL50_SCD_TXFACT, mask); 330 iwl_write_prph(priv, IWLAGN_SCD_TXFACT, mask);
322} 331}
323 332
324static inline int get_queue_from_ac(u16 ac) 333static inline int get_queue_from_ac(u16 ac)
@@ -991,7 +1000,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
991 tid_data = &priv->stations[sta_id].tid[tid]; 1000 tid_data = &priv->stations[sta_id].tid[tid];
992 *ssn = SEQ_TO_SN(tid_data->seq_number); 1001 *ssn = SEQ_TO_SN(tid_data->seq_number);
993 tid_data->agg.txq_id = txq_id; 1002 tid_data->agg.txq_id = txq_id;
994 priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id); 1003 priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(get_ac_from_tid(tid), txq_id);
995 spin_unlock_irqrestore(&priv->sta_lock, flags); 1004 spin_unlock_irqrestore(&priv->sta_lock, flags);
996 1005
997 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo, 1006 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
@@ -1224,8 +1233,9 @@ static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1224 memset(&info->status, 0, sizeof(info->status)); 1233 memset(&info->status, 0, sizeof(info->status));
1225 info->flags |= IEEE80211_TX_STAT_ACK; 1234 info->flags |= IEEE80211_TX_STAT_ACK;
1226 info->flags |= IEEE80211_TX_STAT_AMPDU; 1235 info->flags |= IEEE80211_TX_STAT_AMPDU;
1227 info->status.ampdu_ack_map = successes; 1236 info->status.ampdu_ack_len = successes;
1228 info->status.ampdu_ack_len = agg->frame_count; 1237 info->status.ampdu_ack_map = bitmap;
1238 info->status.ampdu_len = agg->frame_count;
1229 iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info); 1239 iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1230 1240
1231 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap); 1241 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index 52ae157968b2..ae476c234a7c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -207,7 +207,7 @@ static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
207{ 207{
208 struct iwl_calib_xtal_freq_cmd cmd; 208 struct iwl_calib_xtal_freq_cmd cmd;
209 __le16 *xtal_calib = 209 __le16 *xtal_calib =
210 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL); 210 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
211 211
212 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD; 212 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
213 cmd.hdr.first_group = 0; 213 cmd.hdr.first_group = 0;
@@ -329,19 +329,19 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
329 329
330 spin_lock_irqsave(&priv->lock, flags); 330 spin_lock_irqsave(&priv->lock, flags);
331 331
332 priv->scd_base_addr = iwl_read_prph(priv, IWL50_SCD_SRAM_BASE_ADDR); 332 priv->scd_base_addr = iwl_read_prph(priv, IWLAGN_SCD_SRAM_BASE_ADDR);
333 a = priv->scd_base_addr + IWL50_SCD_CONTEXT_DATA_OFFSET; 333 a = priv->scd_base_addr + IWLAGN_SCD_CONTEXT_DATA_OFFSET;
334 for (; a < priv->scd_base_addr + IWL50_SCD_TX_STTS_BITMAP_OFFSET; 334 for (; a < priv->scd_base_addr + IWLAGN_SCD_TX_STTS_BITMAP_OFFSET;
335 a += 4) 335 a += 4)
336 iwl_write_targ_mem(priv, a, 0); 336 iwl_write_targ_mem(priv, a, 0);
337 for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET; 337 for (; a < priv->scd_base_addr + IWLAGN_SCD_TRANSLATE_TBL_OFFSET;
338 a += 4) 338 a += 4)
339 iwl_write_targ_mem(priv, a, 0); 339 iwl_write_targ_mem(priv, a, 0);
340 for (; a < priv->scd_base_addr + 340 for (; a < priv->scd_base_addr +
341 IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4) 341 IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
342 iwl_write_targ_mem(priv, a, 0); 342 iwl_write_targ_mem(priv, a, 0);
343 343
344 iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR, 344 iwl_write_prph(priv, IWLAGN_SCD_DRAM_BASE_ADDR,
345 priv->scd_bc_tbls.dma >> 10); 345 priv->scd_bc_tbls.dma >> 10);
346 346
347 /* Enable DMA channel */ 347 /* Enable DMA channel */
@@ -355,28 +355,28 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
355 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG, 355 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
356 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 356 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
357 357
358 iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, 358 iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL,
359 IWL50_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num)); 359 IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num));
360 iwl_write_prph(priv, IWL50_SCD_AGGR_SEL, 0); 360 iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0);
361 361
362 /* initiate the queues */ 362 /* initiate the queues */
363 for (i = 0; i < priv->hw_params.max_txq_num; i++) { 363 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
364 iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(i), 0); 364 iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(i), 0);
365 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); 365 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
366 iwl_write_targ_mem(priv, priv->scd_base_addr + 366 iwl_write_targ_mem(priv, priv->scd_base_addr +
367 IWL50_SCD_CONTEXT_QUEUE_OFFSET(i), 0); 367 IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
368 iwl_write_targ_mem(priv, priv->scd_base_addr + 368 iwl_write_targ_mem(priv, priv->scd_base_addr +
369 IWL50_SCD_CONTEXT_QUEUE_OFFSET(i) + 369 IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i) +
370 sizeof(u32), 370 sizeof(u32),
371 ((SCD_WIN_SIZE << 371 ((SCD_WIN_SIZE <<
372 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 372 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
373 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 373 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
374 ((SCD_FRAME_LIMIT << 374 ((SCD_FRAME_LIMIT <<
375 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 375 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
376 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 376 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
377 } 377 }
378 378
379 iwl_write_prph(priv, IWL50_SCD_INTERRUPT_MASK, 379 iwl_write_prph(priv, IWLAGN_SCD_INTERRUPT_MASK,
380 IWL_MASK(0, priv->hw_params.max_txq_num)); 380 IWL_MASK(0, priv->hw_params.max_txq_num));
381 381
382 /* Activate all Tx DMA/FIFO channels */ 382 /* Activate all Tx DMA/FIFO channels */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 0b497d4bc659..a672d3379cfd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2174,7 +2174,7 @@ static void iwl_alive_start(struct iwl_priv *priv)
2174 } 2174 }
2175 2175
2176 /* Configure Bluetooth device coexistence support */ 2176 /* Configure Bluetooth device coexistence support */
2177 iwl_send_bt_config(priv); 2177 priv->cfg->ops->hcmd->send_bt_config(priv);
2178 2178
2179 iwl_reset_run_time_calib(priv); 2179 iwl_reset_run_time_calib(priv);
2180 2180
@@ -2654,7 +2654,6 @@ static int iwl_mac_setup_register(struct iwl_priv *priv)
2654 2654
2655 /* Tell mac80211 our characteristics */ 2655 /* Tell mac80211 our characteristics */
2656 hw->flags = IEEE80211_HW_SIGNAL_DBM | 2656 hw->flags = IEEE80211_HW_SIGNAL_DBM |
2657 IEEE80211_HW_NOISE_DBM |
2658 IEEE80211_HW_AMPDU_AGGREGATION | 2657 IEEE80211_HW_AMPDU_AGGREGATION |
2659 IEEE80211_HW_SPECTRUM_MGMT; 2658 IEEE80211_HW_SPECTRUM_MGMT;
2660 2659
@@ -3002,18 +3001,6 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
3002 return 0; 3001 return 0;
3003} 3002}
3004 3003
3005static int iwl_mac_get_stats(struct ieee80211_hw *hw,
3006 struct ieee80211_low_level_stats *stats)
3007{
3008 struct iwl_priv *priv = hw->priv;
3009
3010 priv = hw->priv;
3011 IWL_DEBUG_MAC80211(priv, "enter\n");
3012 IWL_DEBUG_MAC80211(priv, "leave\n");
3013
3014 return 0;
3015}
3016
3017static void iwl_mac_sta_notify(struct ieee80211_hw *hw, 3004static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
3018 struct ieee80211_vif *vif, 3005 struct ieee80211_vif *vif,
3019 enum sta_notify_cmd cmd, 3006 enum sta_notify_cmd cmd,
@@ -3178,44 +3165,6 @@ static ssize_t store_tx_power(struct device *d,
3178 3165
3179static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power); 3166static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
3180 3167
3181static ssize_t show_statistics(struct device *d,
3182 struct device_attribute *attr, char *buf)
3183{
3184 struct iwl_priv *priv = dev_get_drvdata(d);
3185 u32 size = sizeof(struct iwl_notif_statistics);
3186 u32 len = 0, ofs = 0;
3187 u8 *data = (u8 *)&priv->statistics;
3188 int rc = 0;
3189
3190 if (!iwl_is_alive(priv))
3191 return -EAGAIN;
3192
3193 mutex_lock(&priv->mutex);
3194 rc = iwl_send_statistics_request(priv, CMD_SYNC, false);
3195 mutex_unlock(&priv->mutex);
3196
3197 if (rc) {
3198 len = sprintf(buf,
3199 "Error sending statistics request: 0x%08X\n", rc);
3200 return len;
3201 }
3202
3203 while (size && (PAGE_SIZE - len)) {
3204 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
3205 PAGE_SIZE - len, 1);
3206 len = strlen(buf);
3207 if (PAGE_SIZE - len)
3208 buf[len++] = '\n';
3209
3210 ofs += 16;
3211 size -= min(size, 16U);
3212 }
3213
3214 return len;
3215}
3216
3217static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
3218
3219static ssize_t show_rts_ht_protection(struct device *d, 3168static ssize_t show_rts_ht_protection(struct device *d,
3220 struct device_attribute *attr, char *buf) 3169 struct device_attribute *attr, char *buf)
3221{ 3170{
@@ -3305,6 +3254,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
3305 3254
3306 cancel_delayed_work_sync(&priv->init_alive_start); 3255 cancel_delayed_work_sync(&priv->init_alive_start);
3307 cancel_delayed_work(&priv->scan_check); 3256 cancel_delayed_work(&priv->scan_check);
3257 cancel_work_sync(&priv->start_internal_scan);
3308 cancel_delayed_work(&priv->alive_start); 3258 cancel_delayed_work(&priv->alive_start);
3309 cancel_work_sync(&priv->beacon_update); 3259 cancel_work_sync(&priv->beacon_update);
3310 del_timer_sync(&priv->statistics_periodic); 3260 del_timer_sync(&priv->statistics_periodic);
@@ -3400,11 +3350,10 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
3400 iwl_calib_free_results(priv); 3350 iwl_calib_free_results(priv);
3401 iwlcore_free_geos(priv); 3351 iwlcore_free_geos(priv);
3402 iwl_free_channel_map(priv); 3352 iwl_free_channel_map(priv);
3403 kfree(priv->scan); 3353 kfree(priv->scan_cmd);
3404} 3354}
3405 3355
3406static struct attribute *iwl_sysfs_entries[] = { 3356static struct attribute *iwl_sysfs_entries[] = {
3407 &dev_attr_statistics.attr,
3408 &dev_attr_temperature.attr, 3357 &dev_attr_temperature.attr,
3409 &dev_attr_tx_power.attr, 3358 &dev_attr_tx_power.attr,
3410 &dev_attr_rts_ht_protection.attr, 3359 &dev_attr_rts_ht_protection.attr,
@@ -3429,7 +3378,6 @@ static struct ieee80211_ops iwl_hw_ops = {
3429 .configure_filter = iwl_configure_filter, 3378 .configure_filter = iwl_configure_filter,
3430 .set_key = iwl_mac_set_key, 3379 .set_key = iwl_mac_set_key,
3431 .update_tkip_key = iwl_mac_update_tkip_key, 3380 .update_tkip_key = iwl_mac_update_tkip_key,
3432 .get_stats = iwl_mac_get_stats,
3433 .conf_tx = iwl_mac_conf_tx, 3381 .conf_tx = iwl_mac_conf_tx,
3434 .reset_tsf = iwl_mac_reset_tsf, 3382 .reset_tsf = iwl_mac_reset_tsf,
3435 .bss_info_changed = iwl_bss_info_changed, 3383 .bss_info_changed = iwl_bss_info_changed,
@@ -3835,7 +3783,12 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
3835 {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)}, 3783 {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
3836 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)}, 3784 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
3837 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, 3785 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
3838 {IWL_PCI_DEVICE(0x0082, 0x1201, iwl6000i_g2_2agn_cfg)}, 3786
3787/* 6x00 Series Gen2 */
3788 {IWL_PCI_DEVICE(0x0082, 0x1201, iwl6000g2_2agn_cfg)},
3789 {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6000g2_2agn_cfg)},
3790 {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6000g2_2agn_cfg)},
3791 {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6000g2_2agn_cfg)},
3839 3792
3840/* 6x50 WiFi/WiMax Series */ 3793/* 6x50 WiFi/WiMax Series */
3841 {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)}, 3794 {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 5d3142287e14..cfee9994383e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -171,4 +171,7 @@ static inline bool iwl_is_tx_success(u32 status)
171 (status == TX_STATUS_DIRECT_DONE); 171 (status == TX_STATUS_DIRECT_DONE);
172} 172}
173 173
174/* scan */
175void iwlagn_request_scan(struct iwl_priv *priv);
176
174#endif /* __iwl_agn_h__ */ 177#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index 0471c3f8713e..f1fd00b1a65d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -808,6 +808,18 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
808 } 808 }
809 } 809 }
810 810
811 /*
812 * The above algorithm sometimes fails when the ucode
813 * reports 0 for all chains. It's not clear why that
814 * happens to start with, but it is then causing trouble
815 * because this can make us enable more chains than the
816 * hardware really has.
817 *
818 * To be safe, simply mask out any chains that we know
819 * are not on the device.
820 */
821 active_chains &= priv->hw_params.valid_rx_ant;
822
811 num_tx_chains = 0; 823 num_tx_chains = 0;
812 for (i = 0; i < NUM_RX_CHAINS; i++) { 824 for (i = 0; i < NUM_RX_CHAINS; i++) {
813 /* loops on all the bits of 825 /* loops on all the bits of
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index d830086ca195..0086019b7a15 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -1443,7 +1443,7 @@ struct iwl4965_rx_mpdu_res_start {
1443 1443
1444/* 1: Ignore Bluetooth priority for this frame. 1444/* 1: Ignore Bluetooth priority for this frame.
1445 * 0: Delay Tx until Bluetooth device is done (normal usage). */ 1445 * 0: Delay Tx until Bluetooth device is done (normal usage). */
1446#define TX_CMD_FLG_BT_DIS_MSK cpu_to_le32(1 << 12) 1446#define TX_CMD_FLG_IGNORE_BT cpu_to_le32(1 << 12)
1447 1447
1448/* 1: uCode overrides sequence control field in MAC header. 1448/* 1: uCode overrides sequence control field in MAC header.
1449 * 0: Driver provides sequence control field in MAC header. 1449 * 0: Driver provides sequence control field in MAC header.
@@ -2663,7 +2663,9 @@ struct iwl_ssid_ie {
2663#define PROBE_OPTION_MAX_3945 4 2663#define PROBE_OPTION_MAX_3945 4
2664#define PROBE_OPTION_MAX 20 2664#define PROBE_OPTION_MAX 20
2665#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF) 2665#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
2666#define IWL_GOOD_CRC_TH cpu_to_le16(1) 2666#define IWL_GOOD_CRC_TH_DISABLED 0
2667#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
2668#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
2667#define IWL_MAX_SCAN_SIZE 1024 2669#define IWL_MAX_SCAN_SIZE 1024
2668#define IWL_MAX_CMD_SIZE 4096 2670#define IWL_MAX_CMD_SIZE 4096
2669#define IWL_MAX_PROBE_REQUEST 200 2671#define IWL_MAX_PROBE_REQUEST 200
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 2a89747d3473..4cdf4d3a9ddb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -829,19 +829,6 @@ static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
829} 829}
830 830
831/** 831/**
832 * iwl_is_monitor_mode - Determine if interface in monitor mode
833 *
834 * priv->iw_mode is set in add_interface, but add_interface is
835 * never called for monitor mode. The only way mac80211 informs us about
836 * monitor mode is through configuring filters (call to configure_filter).
837 */
838bool iwl_is_monitor_mode(struct iwl_priv *priv)
839{
840 return !!(priv->staging_rxon.filter_flags & RXON_FILTER_PROMISC_MSK);
841}
842EXPORT_SYMBOL(iwl_is_monitor_mode);
843
844/**
845 * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image 832 * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
846 * 833 *
847 * Selects how many and which Rx receivers/antennas/chains to use. 834 * Selects how many and which Rx receivers/antennas/chains to use.
@@ -884,19 +871,6 @@ void iwl_set_rxon_chain(struct iwl_priv *priv)
884 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS; 871 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
885 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS; 872 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
886 873
887 /* copied from 'iwl_bg_request_scan()' */
888 /* Force use of chains B and C (0x6) for Rx
889 * Avoid A (0x1) for the device has off-channel reception on A-band.
890 * MIMO is not used here, but value is required */
891 if (iwl_is_monitor_mode(priv) &&
892 !(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) &&
893 priv->cfg->off_channel_workaround) {
894 rx_chain = ANT_ABC << RXON_RX_CHAIN_VALID_POS;
895 rx_chain |= ANT_BC << RXON_RX_CHAIN_FORCE_SEL_POS;
896 rx_chain |= ANT_ABC << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
897 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
898 }
899
900 priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain); 874 priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain);
901 875
902 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam) 876 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
@@ -1480,7 +1454,7 @@ irqreturn_t iwl_isr_legacy(int irq, void *data)
1480} 1454}
1481EXPORT_SYMBOL(iwl_isr_legacy); 1455EXPORT_SYMBOL(iwl_isr_legacy);
1482 1456
1483int iwl_send_bt_config(struct iwl_priv *priv) 1457void iwl_send_bt_config(struct iwl_priv *priv)
1484{ 1458{
1485 struct iwl_bt_cmd bt_cmd = { 1459 struct iwl_bt_cmd bt_cmd = {
1486 .lead_time = BT_LEAD_TIME_DEF, 1460 .lead_time = BT_LEAD_TIME_DEF,
@@ -1497,8 +1471,9 @@ int iwl_send_bt_config(struct iwl_priv *priv)
1497 IWL_DEBUG_INFO(priv, "BT coex %s\n", 1471 IWL_DEBUG_INFO(priv, "BT coex %s\n",
1498 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active"); 1472 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
1499 1473
1500 return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, 1474 if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1501 sizeof(struct iwl_bt_cmd), &bt_cmd); 1475 sizeof(struct iwl_bt_cmd), &bt_cmd))
1476 IWL_ERR(priv, "failed to send BT Coex Config\n");
1502} 1477}
1503EXPORT_SYMBOL(iwl_send_bt_config); 1478EXPORT_SYMBOL(iwl_send_bt_config);
1504 1479
@@ -1868,7 +1843,6 @@ static inline void iwl_set_no_assoc(struct iwl_priv *priv)
1868 iwlcore_commit_rxon(priv); 1843 iwlcore_commit_rxon(priv);
1869} 1844}
1870 1845
1871#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
1872void iwl_bss_info_changed(struct ieee80211_hw *hw, 1846void iwl_bss_info_changed(struct ieee80211_hw *hw,
1873 struct ieee80211_vif *vif, 1847 struct ieee80211_vif *vif,
1874 struct ieee80211_bss_conf *bss_conf, 1848 struct ieee80211_bss_conf *bss_conf,
@@ -1989,14 +1963,6 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
1989 1963
1990 iwl_led_associate(priv); 1964 iwl_led_associate(priv);
1991 1965
1992 /*
1993 * We have just associated, don't start scan too early
1994 * leave time for EAPOL exchange to complete.
1995 *
1996 * XXX: do this in mac80211
1997 */
1998 priv->next_scan_jiffies = jiffies +
1999 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
2000 if (!iwl_is_rfkill(priv)) 1966 if (!iwl_is_rfkill(priv))
2001 priv->cfg->ops->lib->post_associate(priv); 1967 priv->cfg->ops->lib->post_associate(priv);
2002 } else 1968 } else
@@ -2151,10 +2117,6 @@ EXPORT_SYMBOL(iwl_mac_remove_interface);
2151 2117
2152/** 2118/**
2153 * iwl_mac_config - mac80211 config callback 2119 * iwl_mac_config - mac80211 config callback
2154 *
2155 * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
2156 * be set inappropriately and the driver currently sets the hardware up to
2157 * use it whenever needed.
2158 */ 2120 */
2159int iwl_mac_config(struct ieee80211_hw *hw, u32 changed) 2121int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2160{ 2122{
@@ -2383,11 +2345,11 @@ EXPORT_SYMBOL(iwl_free_txq_mem);
2383 2345
2384int iwl_send_wimax_coex(struct iwl_priv *priv) 2346int iwl_send_wimax_coex(struct iwl_priv *priv)
2385{ 2347{
2386 struct iwl_wimax_coex_cmd uninitialized_var(coex_cmd); 2348 struct iwl_wimax_coex_cmd coex_cmd;
2387 2349
2388 if (priv->cfg->support_wimax_coexist) { 2350 if (priv->cfg->support_wimax_coexist) {
2389 /* UnMask wake up src at associated sleep */ 2351 /* UnMask wake up src at associated sleep */
2390 coex_cmd.flags |= COEX_FLAGS_ASSOC_WA_UNMASK_MSK; 2352 coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
2391 2353
2392 /* UnMask wake up src at unassociated sleep */ 2354 /* UnMask wake up src at unassociated sleep */
2393 coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK; 2355 coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
@@ -2802,7 +2764,6 @@ static void iwl_force_rf_reset(struct iwl_priv *priv)
2802 */ 2764 */
2803 IWL_DEBUG_INFO(priv, "perform radio reset.\n"); 2765 IWL_DEBUG_INFO(priv, "perform radio reset.\n");
2804 iwl_internal_short_hw_scan(priv); 2766 iwl_internal_short_hw_scan(priv);
2805 return;
2806} 2767}
2807 2768
2808 2769
@@ -2970,6 +2931,12 @@ int iwl_pci_resume(struct pci_dev *pdev)
2970 struct iwl_priv *priv = pci_get_drvdata(pdev); 2931 struct iwl_priv *priv = pci_get_drvdata(pdev);
2971 int ret; 2932 int ret;
2972 2933
2934 /*
2935 * We disable the RETRY_TIMEOUT register (0x41) to keep
2936 * PCI Tx retries from interfering with C3 CPU state.
2937 */
2938 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2939
2973 pci_set_power_state(pdev, PCI_D0); 2940 pci_set_power_state(pdev, PCI_D0);
2974 ret = pci_enable_device(pdev); 2941 ret = pci_enable_device(pdev);
2975 if (ret) 2942 if (ret)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index d89755f5031a..727360944859 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -90,6 +90,7 @@ struct iwl_hcmd_ops {
90 int (*commit_rxon)(struct iwl_priv *priv); 90 int (*commit_rxon)(struct iwl_priv *priv);
91 void (*set_rxon_chain)(struct iwl_priv *priv); 91 void (*set_rxon_chain)(struct iwl_priv *priv);
92 int (*set_tx_ant)(struct iwl_priv *priv, u8 valid_tx_ant); 92 int (*set_tx_ant)(struct iwl_priv *priv, u8 valid_tx_ant);
93 void (*send_bt_config)(struct iwl_priv *priv);
93}; 94};
94 95
95struct iwl_hcmd_utils_ops { 96struct iwl_hcmd_utils_ops {
@@ -105,6 +106,7 @@ struct iwl_hcmd_utils_ops {
105 __le32 *tx_flags); 106 __le32 *tx_flags);
106 int (*calc_rssi)(struct iwl_priv *priv, 107 int (*calc_rssi)(struct iwl_priv *priv,
107 struct iwl_rx_phy_res *rx_resp); 108 struct iwl_rx_phy_res *rx_resp);
109 void (*request_scan)(struct iwl_priv *priv);
108}; 110};
109 111
110struct iwl_apm_ops { 112struct iwl_apm_ops {
@@ -114,6 +116,15 @@ struct iwl_apm_ops {
114 int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src); 116 int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src);
115}; 117};
116 118
119struct iwl_debugfs_ops {
120 ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
121 size_t count, loff_t *ppos);
122 ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
123 size_t count, loff_t *ppos);
124 ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
125 size_t count, loff_t *ppos);
126};
127
117struct iwl_temp_ops { 128struct iwl_temp_ops {
118 void (*temperature)(struct iwl_priv *priv); 129 void (*temperature)(struct iwl_priv *priv);
119 void (*set_ct_kill)(struct iwl_priv *priv); 130 void (*set_ct_kill)(struct iwl_priv *priv);
@@ -199,6 +210,7 @@ struct iwl_lib_ops {
199 /* check for ack health */ 210 /* check for ack health */
200 bool (*check_ack_health)(struct iwl_priv *priv, 211 bool (*check_ack_health)(struct iwl_priv *priv,
201 struct iwl_rx_packet *pkt); 212 struct iwl_rx_packet *pkt);
213 struct iwl_debugfs_ops debugfs_ops;
202}; 214};
203 215
204struct iwl_led_ops { 216struct iwl_led_ops {
@@ -306,8 +318,8 @@ struct iwl_cfg {
306 /* timer period for monitor the driver queues */ 318 /* timer period for monitor the driver queues */
307 u32 monitor_recover_period; 319 u32 monitor_recover_period;
308 bool temperature_kelvin; 320 bool temperature_kelvin;
309 bool off_channel_workaround;
310 u32 max_event_log_size; 321 u32 max_event_log_size;
322 u8 scan_antennas[IEEE80211_NUM_BANDS];
311}; 323};
312 324
313/*************************** 325/***************************
@@ -339,7 +351,6 @@ void iwl_configure_filter(struct ieee80211_hw *hw,
339 unsigned int changed_flags, 351 unsigned int changed_flags,
340 unsigned int *total_flags, u64 multicast); 352 unsigned int *total_flags, u64 multicast);
341int iwl_set_hw_params(struct iwl_priv *priv); 353int iwl_set_hw_params(struct iwl_priv *priv);
342bool iwl_is_monitor_mode(struct iwl_priv *priv);
343void iwl_post_associate(struct iwl_priv *priv); 354void iwl_post_associate(struct iwl_priv *priv);
344void iwl_bss_info_changed(struct ieee80211_hw *hw, 355void iwl_bss_info_changed(struct ieee80211_hw *hw,
345 struct ieee80211_vif *vif, 356 struct ieee80211_vif *vif,
@@ -501,8 +512,10 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
501void iwl_init_scan_params(struct iwl_priv *priv); 512void iwl_init_scan_params(struct iwl_priv *priv);
502int iwl_scan_cancel(struct iwl_priv *priv); 513int iwl_scan_cancel(struct iwl_priv *priv);
503int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); 514int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
504int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req); 515int iwl_mac_hw_scan(struct ieee80211_hw *hw,
505int iwl_internal_short_hw_scan(struct iwl_priv *priv); 516 struct ieee80211_vif *vif,
517 struct cfg80211_scan_request *req);
518void iwl_internal_short_hw_scan(struct iwl_priv *priv);
506int iwl_force_reset(struct iwl_priv *priv, int mode); 519int iwl_force_reset(struct iwl_priv *priv, int mode);
507u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, 520u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
508 const u8 *ie, int ie_len, int left); 521 const u8 *ie, int ie_len, int left);
@@ -526,6 +539,7 @@ void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
526#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */ 539#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
527#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */ 540#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
528 541
542#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
529 543
530/******************************************************************************* 544/*******************************************************************************
531 * Calibrations - implemented in iwl-calib.c 545 * Calibrations - implemented in iwl-calib.c
@@ -665,7 +679,7 @@ static inline int iwl_is_ready_rf(struct iwl_priv *priv)
665} 679}
666 680
667extern void iwl_rf_kill_ct_config(struct iwl_priv *priv); 681extern void iwl_rf_kill_ct_config(struct iwl_priv *priv);
668extern int iwl_send_bt_config(struct iwl_priv *priv); 682extern void iwl_send_bt_config(struct iwl_priv *priv);
669extern int iwl_send_statistics_request(struct iwl_priv *priv, 683extern int iwl_send_statistics_request(struct iwl_priv *priv,
670 u8 flags, bool clear); 684 u8 flags, bool clear);
671extern int iwl_verify_ucode(struct iwl_priv *priv); 685extern int iwl_verify_ucode(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 808b7146bead..254c35ae8b38 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -298,6 +298,7 @@
298#define CSR_HW_REV_TYPE_1000 (0x0000060) 298#define CSR_HW_REV_TYPE_1000 (0x0000060)
299#define CSR_HW_REV_TYPE_6x00 (0x0000070) 299#define CSR_HW_REV_TYPE_6x00 (0x0000070)
300#define CSR_HW_REV_TYPE_6x50 (0x0000080) 300#define CSR_HW_REV_TYPE_6x50 (0x0000080)
301#define CSR_HW_REV_TYPE_6x00g2 (0x00000B0)
301#define CSR_HW_REV_TYPE_NONE (0x00000F0) 302#define CSR_HW_REV_TYPE_NONE (0x00000F0)
302 303
303/* EEPROM REG */ 304/* EEPROM REG */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 1c7b53d511c7..5c2bcef5df0c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -78,6 +78,8 @@ static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
78#ifdef CONFIG_IWLWIFI_DEBUGFS 78#ifdef CONFIG_IWLWIFI_DEBUGFS
79int iwl_dbgfs_register(struct iwl_priv *priv, const char *name); 79int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
80void iwl_dbgfs_unregister(struct iwl_priv *priv); 80void iwl_dbgfs_unregister(struct iwl_priv *priv);
81extern int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf,
82 int bufsz);
81#else 83#else
82static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) 84static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
83{ 85{
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 607a91f3eb6b..4aabb542fcbe 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -106,6 +106,26 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
106 .open = iwl_dbgfs_open_file_generic, \ 106 .open = iwl_dbgfs_open_file_generic, \
107}; 107};
108 108
109int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
110{
111 int p = 0;
112
113 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
114 le32_to_cpu(priv->statistics.flag));
115 if (le32_to_cpu(priv->statistics.flag) & UCODE_STATISTICS_CLEAR_MSK)
116 p += scnprintf(buf + p, bufsz - p,
117 "\tStatistics have been cleared\n");
118 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
119 (le32_to_cpu(priv->statistics.flag) &
120 UCODE_STATISTICS_FREQUENCY_MSK)
121 ? "2.4 GHz" : "5.2 GHz");
122 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
123 (le32_to_cpu(priv->statistics.flag) &
124 UCODE_STATISTICS_NARROW_BAND_MSK)
125 ? "enabled" : "disabled");
126 return p;
127}
128EXPORT_SYMBOL(iwl_dbgfs_statistics_flag);
109 129
110static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file, 130static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
111 char __user *user_buf, 131 char __user *user_buf,
@@ -1034,474 +1054,13 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1034 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1054 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1035} 1055}
1036 1056
1037static int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf,
1038 int bufsz)
1039{
1040 int p = 0;
1041
1042 p += scnprintf(buf + p, bufsz - p,
1043 "Statistics Flag(0x%X):\n",
1044 le32_to_cpu(priv->statistics.flag));
1045 if (le32_to_cpu(priv->statistics.flag) & UCODE_STATISTICS_CLEAR_MSK)
1046 p += scnprintf(buf + p, bufsz - p,
1047 "\tStatistics have been cleared\n");
1048 p += scnprintf(buf + p, bufsz - p,
1049 "\tOperational Frequency: %s\n",
1050 (le32_to_cpu(priv->statistics.flag) &
1051 UCODE_STATISTICS_FREQUENCY_MSK)
1052 ? "2.4 GHz" : "5.2 GHz");
1053 p += scnprintf(buf + p, bufsz - p,
1054 "\tTGj Narrow Band: %s\n",
1055 (le32_to_cpu(priv->statistics.flag) &
1056 UCODE_STATISTICS_NARROW_BAND_MSK)
1057 ? "enabled" : "disabled");
1058 return p;
1059}
1060
1061static const char ucode_stats_header[] =
1062 "%-32s current acumulative delta max\n";
1063static const char ucode_stats_short_format[] =
1064 " %-30s %10u\n";
1065static const char ucode_stats_format[] =
1066 " %-30s %10u %10u %10u %10u\n";
1067
1068static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file, 1057static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
1069 char __user *user_buf, 1058 char __user *user_buf,
1070 size_t count, loff_t *ppos) 1059 size_t count, loff_t *ppos)
1071{ 1060{
1072 struct iwl_priv *priv = file->private_data; 1061 struct iwl_priv *priv = file->private_data;
1073 int pos = 0; 1062 return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file,
1074 char *buf; 1063 user_buf, count, ppos);
1075 int bufsz = sizeof(struct statistics_rx_phy) * 40 +
1076 sizeof(struct statistics_rx_non_phy) * 40 +
1077 sizeof(struct statistics_rx_ht_phy) * 40 + 400;
1078 ssize_t ret;
1079 struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
1080 struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
1081 struct statistics_rx_non_phy *general, *accum_general;
1082 struct statistics_rx_non_phy *delta_general, *max_general;
1083 struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
1084
1085 if (!iwl_is_alive(priv))
1086 return -EAGAIN;
1087
1088 buf = kzalloc(bufsz, GFP_KERNEL);
1089 if (!buf) {
1090 IWL_ERR(priv, "Can not allocate Buffer\n");
1091 return -ENOMEM;
1092 }
1093
1094 /* the statistic information display here is based on
1095 * the last statistics notification from uCode
1096 * might not reflect the current uCode activity
1097 */
1098 ofdm = &priv->statistics.rx.ofdm;
1099 cck = &priv->statistics.rx.cck;
1100 general = &priv->statistics.rx.general;
1101 ht = &priv->statistics.rx.ofdm_ht;
1102 accum_ofdm = &priv->accum_statistics.rx.ofdm;
1103 accum_cck = &priv->accum_statistics.rx.cck;
1104 accum_general = &priv->accum_statistics.rx.general;
1105 accum_ht = &priv->accum_statistics.rx.ofdm_ht;
1106 delta_ofdm = &priv->delta_statistics.rx.ofdm;
1107 delta_cck = &priv->delta_statistics.rx.cck;
1108 delta_general = &priv->delta_statistics.rx.general;
1109 delta_ht = &priv->delta_statistics.rx.ofdm_ht;
1110 max_ofdm = &priv->max_delta.rx.ofdm;
1111 max_cck = &priv->max_delta.rx.cck;
1112 max_general = &priv->max_delta.rx.general;
1113 max_ht = &priv->max_delta.rx.ofdm_ht;
1114
1115 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
1116 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
1117 "Statistics_Rx - OFDM:");
1118 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1119 "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
1120 accum_ofdm->ina_cnt,
1121 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
1122 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1123 "fina_cnt:",
1124 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
1125 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
1126 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1127 "plcp_err:",
1128 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
1129 delta_ofdm->plcp_err, max_ofdm->plcp_err);
1130 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1131 "crc32_err:",
1132 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
1133 delta_ofdm->crc32_err, max_ofdm->crc32_err);
1134 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1135 "overrun_err:",
1136 le32_to_cpu(ofdm->overrun_err),
1137 accum_ofdm->overrun_err,
1138 delta_ofdm->overrun_err, max_ofdm->overrun_err);
1139 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1140 "early_overrun_err:",
1141 le32_to_cpu(ofdm->early_overrun_err),
1142 accum_ofdm->early_overrun_err,
1143 delta_ofdm->early_overrun_err,
1144 max_ofdm->early_overrun_err);
1145 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1146 "crc32_good:",
1147 le32_to_cpu(ofdm->crc32_good),
1148 accum_ofdm->crc32_good,
1149 delta_ofdm->crc32_good, max_ofdm->crc32_good);
1150 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1151 "false_alarm_cnt:",
1152 le32_to_cpu(ofdm->false_alarm_cnt),
1153 accum_ofdm->false_alarm_cnt,
1154 delta_ofdm->false_alarm_cnt,
1155 max_ofdm->false_alarm_cnt);
1156 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1157 "fina_sync_err_cnt:",
1158 le32_to_cpu(ofdm->fina_sync_err_cnt),
1159 accum_ofdm->fina_sync_err_cnt,
1160 delta_ofdm->fina_sync_err_cnt,
1161 max_ofdm->fina_sync_err_cnt);
1162 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1163 "sfd_timeout:",
1164 le32_to_cpu(ofdm->sfd_timeout),
1165 accum_ofdm->sfd_timeout,
1166 delta_ofdm->sfd_timeout,
1167 max_ofdm->sfd_timeout);
1168 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1169 "fina_timeout:",
1170 le32_to_cpu(ofdm->fina_timeout),
1171 accum_ofdm->fina_timeout,
1172 delta_ofdm->fina_timeout,
1173 max_ofdm->fina_timeout);
1174 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1175 "unresponded_rts:",
1176 le32_to_cpu(ofdm->unresponded_rts),
1177 accum_ofdm->unresponded_rts,
1178 delta_ofdm->unresponded_rts,
1179 max_ofdm->unresponded_rts);
1180 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1181 "rxe_frame_lmt_ovrun:",
1182 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
1183 accum_ofdm->rxe_frame_limit_overrun,
1184 delta_ofdm->rxe_frame_limit_overrun,
1185 max_ofdm->rxe_frame_limit_overrun);
1186 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1187 "sent_ack_cnt:",
1188 le32_to_cpu(ofdm->sent_ack_cnt),
1189 accum_ofdm->sent_ack_cnt,
1190 delta_ofdm->sent_ack_cnt,
1191 max_ofdm->sent_ack_cnt);
1192 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1193 "sent_cts_cnt:",
1194 le32_to_cpu(ofdm->sent_cts_cnt),
1195 accum_ofdm->sent_cts_cnt,
1196 delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
1197 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1198 "sent_ba_rsp_cnt:",
1199 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
1200 accum_ofdm->sent_ba_rsp_cnt,
1201 delta_ofdm->sent_ba_rsp_cnt,
1202 max_ofdm->sent_ba_rsp_cnt);
1203 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1204 "dsp_self_kill:",
1205 le32_to_cpu(ofdm->dsp_self_kill),
1206 accum_ofdm->dsp_self_kill,
1207 delta_ofdm->dsp_self_kill,
1208 max_ofdm->dsp_self_kill);
1209 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1210 "mh_format_err:",
1211 le32_to_cpu(ofdm->mh_format_err),
1212 accum_ofdm->mh_format_err,
1213 delta_ofdm->mh_format_err,
1214 max_ofdm->mh_format_err);
1215 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1216 "re_acq_main_rssi_sum:",
1217 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
1218 accum_ofdm->re_acq_main_rssi_sum,
1219 delta_ofdm->re_acq_main_rssi_sum,
1220 max_ofdm->re_acq_main_rssi_sum);
1221
1222 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
1223 "Statistics_Rx - CCK:");
1224 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1225 "ina_cnt:",
1226 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
1227 delta_cck->ina_cnt, max_cck->ina_cnt);
1228 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1229 "fina_cnt:",
1230 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
1231 delta_cck->fina_cnt, max_cck->fina_cnt);
1232 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1233 "plcp_err:",
1234 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
1235 delta_cck->plcp_err, max_cck->plcp_err);
1236 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1237 "crc32_err:",
1238 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
1239 delta_cck->crc32_err, max_cck->crc32_err);
1240 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1241 "overrun_err:",
1242 le32_to_cpu(cck->overrun_err),
1243 accum_cck->overrun_err,
1244 delta_cck->overrun_err, max_cck->overrun_err);
1245 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1246 "early_overrun_err:",
1247 le32_to_cpu(cck->early_overrun_err),
1248 accum_cck->early_overrun_err,
1249 delta_cck->early_overrun_err,
1250 max_cck->early_overrun_err);
1251 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1252 "crc32_good:",
1253 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
1254 delta_cck->crc32_good,
1255 max_cck->crc32_good);
1256 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1257 "false_alarm_cnt:",
1258 le32_to_cpu(cck->false_alarm_cnt),
1259 accum_cck->false_alarm_cnt,
1260 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
1261 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1262 "fina_sync_err_cnt:",
1263 le32_to_cpu(cck->fina_sync_err_cnt),
1264 accum_cck->fina_sync_err_cnt,
1265 delta_cck->fina_sync_err_cnt,
1266 max_cck->fina_sync_err_cnt);
1267 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1268 "sfd_timeout:",
1269 le32_to_cpu(cck->sfd_timeout),
1270 accum_cck->sfd_timeout,
1271 delta_cck->sfd_timeout, max_cck->sfd_timeout);
1272 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1273 "fina_timeout:",
1274 le32_to_cpu(cck->fina_timeout),
1275 accum_cck->fina_timeout,
1276 delta_cck->fina_timeout, max_cck->fina_timeout);
1277 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1278 "unresponded_rts:",
1279 le32_to_cpu(cck->unresponded_rts),
1280 accum_cck->unresponded_rts,
1281 delta_cck->unresponded_rts,
1282 max_cck->unresponded_rts);
1283 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1284 "rxe_frame_lmt_ovrun:",
1285 le32_to_cpu(cck->rxe_frame_limit_overrun),
1286 accum_cck->rxe_frame_limit_overrun,
1287 delta_cck->rxe_frame_limit_overrun,
1288 max_cck->rxe_frame_limit_overrun);
1289 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1290 "sent_ack_cnt:",
1291 le32_to_cpu(cck->sent_ack_cnt),
1292 accum_cck->sent_ack_cnt,
1293 delta_cck->sent_ack_cnt,
1294 max_cck->sent_ack_cnt);
1295 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1296 "sent_cts_cnt:",
1297 le32_to_cpu(cck->sent_cts_cnt),
1298 accum_cck->sent_cts_cnt,
1299 delta_cck->sent_cts_cnt,
1300 max_cck->sent_cts_cnt);
1301 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1302 "sent_ba_rsp_cnt:",
1303 le32_to_cpu(cck->sent_ba_rsp_cnt),
1304 accum_cck->sent_ba_rsp_cnt,
1305 delta_cck->sent_ba_rsp_cnt,
1306 max_cck->sent_ba_rsp_cnt);
1307 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1308 "dsp_self_kill:",
1309 le32_to_cpu(cck->dsp_self_kill),
1310 accum_cck->dsp_self_kill,
1311 delta_cck->dsp_self_kill,
1312 max_cck->dsp_self_kill);
1313 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1314 "mh_format_err:",
1315 le32_to_cpu(cck->mh_format_err),
1316 accum_cck->mh_format_err,
1317 delta_cck->mh_format_err, max_cck->mh_format_err);
1318 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1319 "re_acq_main_rssi_sum:",
1320 le32_to_cpu(cck->re_acq_main_rssi_sum),
1321 accum_cck->re_acq_main_rssi_sum,
1322 delta_cck->re_acq_main_rssi_sum,
1323 max_cck->re_acq_main_rssi_sum);
1324
1325 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
1326 "Statistics_Rx - GENERAL:");
1327 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1328 "bogus_cts:",
1329 le32_to_cpu(general->bogus_cts),
1330 accum_general->bogus_cts,
1331 delta_general->bogus_cts, max_general->bogus_cts);
1332 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1333 "bogus_ack:",
1334 le32_to_cpu(general->bogus_ack),
1335 accum_general->bogus_ack,
1336 delta_general->bogus_ack, max_general->bogus_ack);
1337 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1338 "non_bssid_frames:",
1339 le32_to_cpu(general->non_bssid_frames),
1340 accum_general->non_bssid_frames,
1341 delta_general->non_bssid_frames,
1342 max_general->non_bssid_frames);
1343 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1344 "filtered_frames:",
1345 le32_to_cpu(general->filtered_frames),
1346 accum_general->filtered_frames,
1347 delta_general->filtered_frames,
1348 max_general->filtered_frames);
1349 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1350 "non_channel_beacons:",
1351 le32_to_cpu(general->non_channel_beacons),
1352 accum_general->non_channel_beacons,
1353 delta_general->non_channel_beacons,
1354 max_general->non_channel_beacons);
1355 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1356 "channel_beacons:",
1357 le32_to_cpu(general->channel_beacons),
1358 accum_general->channel_beacons,
1359 delta_general->channel_beacons,
1360 max_general->channel_beacons);
1361 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1362 "num_missed_bcon:",
1363 le32_to_cpu(general->num_missed_bcon),
1364 accum_general->num_missed_bcon,
1365 delta_general->num_missed_bcon,
1366 max_general->num_missed_bcon);
1367 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1368 "adc_rx_saturation_time:",
1369 le32_to_cpu(general->adc_rx_saturation_time),
1370 accum_general->adc_rx_saturation_time,
1371 delta_general->adc_rx_saturation_time,
1372 max_general->adc_rx_saturation_time);
1373 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1374 "ina_detect_search_tm:",
1375 le32_to_cpu(general->ina_detection_search_time),
1376 accum_general->ina_detection_search_time,
1377 delta_general->ina_detection_search_time,
1378 max_general->ina_detection_search_time);
1379 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1380 "beacon_silence_rssi_a:",
1381 le32_to_cpu(general->beacon_silence_rssi_a),
1382 accum_general->beacon_silence_rssi_a,
1383 delta_general->beacon_silence_rssi_a,
1384 max_general->beacon_silence_rssi_a);
1385 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1386 "beacon_silence_rssi_b:",
1387 le32_to_cpu(general->beacon_silence_rssi_b),
1388 accum_general->beacon_silence_rssi_b,
1389 delta_general->beacon_silence_rssi_b,
1390 max_general->beacon_silence_rssi_b);
1391 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1392 "beacon_silence_rssi_c:",
1393 le32_to_cpu(general->beacon_silence_rssi_c),
1394 accum_general->beacon_silence_rssi_c,
1395 delta_general->beacon_silence_rssi_c,
1396 max_general->beacon_silence_rssi_c);
1397 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1398 "interference_data_flag:",
1399 le32_to_cpu(general->interference_data_flag),
1400 accum_general->interference_data_flag,
1401 delta_general->interference_data_flag,
1402 max_general->interference_data_flag);
1403 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1404 "channel_load:",
1405 le32_to_cpu(general->channel_load),
1406 accum_general->channel_load,
1407 delta_general->channel_load,
1408 max_general->channel_load);
1409 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1410 "dsp_false_alarms:",
1411 le32_to_cpu(general->dsp_false_alarms),
1412 accum_general->dsp_false_alarms,
1413 delta_general->dsp_false_alarms,
1414 max_general->dsp_false_alarms);
1415 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1416 "beacon_rssi_a:",
1417 le32_to_cpu(general->beacon_rssi_a),
1418 accum_general->beacon_rssi_a,
1419 delta_general->beacon_rssi_a,
1420 max_general->beacon_rssi_a);
1421 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1422 "beacon_rssi_b:",
1423 le32_to_cpu(general->beacon_rssi_b),
1424 accum_general->beacon_rssi_b,
1425 delta_general->beacon_rssi_b,
1426 max_general->beacon_rssi_b);
1427 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1428 "beacon_rssi_c:",
1429 le32_to_cpu(general->beacon_rssi_c),
1430 accum_general->beacon_rssi_c,
1431 delta_general->beacon_rssi_c,
1432 max_general->beacon_rssi_c);
1433 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1434 "beacon_energy_a:",
1435 le32_to_cpu(general->beacon_energy_a),
1436 accum_general->beacon_energy_a,
1437 delta_general->beacon_energy_a,
1438 max_general->beacon_energy_a);
1439 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1440 "beacon_energy_b:",
1441 le32_to_cpu(general->beacon_energy_b),
1442 accum_general->beacon_energy_b,
1443 delta_general->beacon_energy_b,
1444 max_general->beacon_energy_b);
1445 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1446 "beacon_energy_c:",
1447 le32_to_cpu(general->beacon_energy_c),
1448 accum_general->beacon_energy_c,
1449 delta_general->beacon_energy_c,
1450 max_general->beacon_energy_c);
1451
1452 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n");
1453 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
1454 "Statistics_Rx - OFDM_HT:");
1455 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1456 "plcp_err:",
1457 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
1458 delta_ht->plcp_err, max_ht->plcp_err);
1459 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1460 "overrun_err:",
1461 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
1462 delta_ht->overrun_err, max_ht->overrun_err);
1463 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1464 "early_overrun_err:",
1465 le32_to_cpu(ht->early_overrun_err),
1466 accum_ht->early_overrun_err,
1467 delta_ht->early_overrun_err,
1468 max_ht->early_overrun_err);
1469 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1470 "crc32_good:",
1471 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
1472 delta_ht->crc32_good, max_ht->crc32_good);
1473 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1474 "crc32_err:",
1475 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
1476 delta_ht->crc32_err, max_ht->crc32_err);
1477 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1478 "mh_format_err:",
1479 le32_to_cpu(ht->mh_format_err),
1480 accum_ht->mh_format_err,
1481 delta_ht->mh_format_err, max_ht->mh_format_err);
1482 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1483 "agg_crc32_good:",
1484 le32_to_cpu(ht->agg_crc32_good),
1485 accum_ht->agg_crc32_good,
1486 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
1487 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1488 "agg_mpdu_cnt:",
1489 le32_to_cpu(ht->agg_mpdu_cnt),
1490 accum_ht->agg_mpdu_cnt,
1491 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
1492 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1493 "agg_cnt:",
1494 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
1495 delta_ht->agg_cnt, max_ht->agg_cnt);
1496 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1497 "unsupport_mcs:",
1498 le32_to_cpu(ht->unsupport_mcs),
1499 accum_ht->unsupport_mcs,
1500 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
1501
1502 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1503 kfree(buf);
1504 return ret;
1505} 1064}
1506 1065
1507static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file, 1066static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
@@ -1509,173 +1068,8 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1509 size_t count, loff_t *ppos) 1068 size_t count, loff_t *ppos)
1510{ 1069{
1511 struct iwl_priv *priv = file->private_data; 1070 struct iwl_priv *priv = file->private_data;
1512 int pos = 0; 1071 return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file,
1513 char *buf; 1072 user_buf, count, ppos);
1514 int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
1515 ssize_t ret;
1516 struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
1517
1518 if (!iwl_is_alive(priv))
1519 return -EAGAIN;
1520
1521 buf = kzalloc(bufsz, GFP_KERNEL);
1522 if (!buf) {
1523 IWL_ERR(priv, "Can not allocate Buffer\n");
1524 return -ENOMEM;
1525 }
1526
1527 /* the statistic information display here is based on
1528 * the last statistics notification from uCode
1529 * might not reflect the current uCode activity
1530 */
1531 tx = &priv->statistics.tx;
1532 accum_tx = &priv->accum_statistics.tx;
1533 delta_tx = &priv->delta_statistics.tx;
1534 max_tx = &priv->max_delta.tx;
1535 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
1536 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
1537 "Statistics_Tx:");
1538 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1539 "preamble:",
1540 le32_to_cpu(tx->preamble_cnt),
1541 accum_tx->preamble_cnt,
1542 delta_tx->preamble_cnt, max_tx->preamble_cnt);
1543 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1544 "rx_detected_cnt:",
1545 le32_to_cpu(tx->rx_detected_cnt),
1546 accum_tx->rx_detected_cnt,
1547 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
1548 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1549 "bt_prio_defer_cnt:",
1550 le32_to_cpu(tx->bt_prio_defer_cnt),
1551 accum_tx->bt_prio_defer_cnt,
1552 delta_tx->bt_prio_defer_cnt,
1553 max_tx->bt_prio_defer_cnt);
1554 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1555 "bt_prio_kill_cnt:",
1556 le32_to_cpu(tx->bt_prio_kill_cnt),
1557 accum_tx->bt_prio_kill_cnt,
1558 delta_tx->bt_prio_kill_cnt,
1559 max_tx->bt_prio_kill_cnt);
1560 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1561 "few_bytes_cnt:",
1562 le32_to_cpu(tx->few_bytes_cnt),
1563 accum_tx->few_bytes_cnt,
1564 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
1565 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1566 "cts_timeout:",
1567 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
1568 delta_tx->cts_timeout, max_tx->cts_timeout);
1569 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1570 "ack_timeout:",
1571 le32_to_cpu(tx->ack_timeout),
1572 accum_tx->ack_timeout,
1573 delta_tx->ack_timeout, max_tx->ack_timeout);
1574 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1575 "expected_ack_cnt:",
1576 le32_to_cpu(tx->expected_ack_cnt),
1577 accum_tx->expected_ack_cnt,
1578 delta_tx->expected_ack_cnt,
1579 max_tx->expected_ack_cnt);
1580 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1581 "actual_ack_cnt:",
1582 le32_to_cpu(tx->actual_ack_cnt),
1583 accum_tx->actual_ack_cnt,
1584 delta_tx->actual_ack_cnt,
1585 max_tx->actual_ack_cnt);
1586 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1587 "dump_msdu_cnt:",
1588 le32_to_cpu(tx->dump_msdu_cnt),
1589 accum_tx->dump_msdu_cnt,
1590 delta_tx->dump_msdu_cnt,
1591 max_tx->dump_msdu_cnt);
1592 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1593 "abort_nxt_frame_mismatch:",
1594 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
1595 accum_tx->burst_abort_next_frame_mismatch_cnt,
1596 delta_tx->burst_abort_next_frame_mismatch_cnt,
1597 max_tx->burst_abort_next_frame_mismatch_cnt);
1598 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1599 "abort_missing_nxt_frame:",
1600 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
1601 accum_tx->burst_abort_missing_next_frame_cnt,
1602 delta_tx->burst_abort_missing_next_frame_cnt,
1603 max_tx->burst_abort_missing_next_frame_cnt);
1604 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1605 "cts_timeout_collision:",
1606 le32_to_cpu(tx->cts_timeout_collision),
1607 accum_tx->cts_timeout_collision,
1608 delta_tx->cts_timeout_collision,
1609 max_tx->cts_timeout_collision);
1610 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1611 "ack_ba_timeout_collision:",
1612 le32_to_cpu(tx->ack_or_ba_timeout_collision),
1613 accum_tx->ack_or_ba_timeout_collision,
1614 delta_tx->ack_or_ba_timeout_collision,
1615 max_tx->ack_or_ba_timeout_collision);
1616 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1617 "agg ba_timeout:",
1618 le32_to_cpu(tx->agg.ba_timeout),
1619 accum_tx->agg.ba_timeout,
1620 delta_tx->agg.ba_timeout,
1621 max_tx->agg.ba_timeout);
1622 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1623 "agg ba_resched_frames:",
1624 le32_to_cpu(tx->agg.ba_reschedule_frames),
1625 accum_tx->agg.ba_reschedule_frames,
1626 delta_tx->agg.ba_reschedule_frames,
1627 max_tx->agg.ba_reschedule_frames);
1628 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1629 "agg scd_query_agg_frame:",
1630 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
1631 accum_tx->agg.scd_query_agg_frame_cnt,
1632 delta_tx->agg.scd_query_agg_frame_cnt,
1633 max_tx->agg.scd_query_agg_frame_cnt);
1634 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1635 "agg scd_query_no_agg:",
1636 le32_to_cpu(tx->agg.scd_query_no_agg),
1637 accum_tx->agg.scd_query_no_agg,
1638 delta_tx->agg.scd_query_no_agg,
1639 max_tx->agg.scd_query_no_agg);
1640 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1641 "agg scd_query_agg:",
1642 le32_to_cpu(tx->agg.scd_query_agg),
1643 accum_tx->agg.scd_query_agg,
1644 delta_tx->agg.scd_query_agg,
1645 max_tx->agg.scd_query_agg);
1646 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1647 "agg scd_query_mismatch:",
1648 le32_to_cpu(tx->agg.scd_query_mismatch),
1649 accum_tx->agg.scd_query_mismatch,
1650 delta_tx->agg.scd_query_mismatch,
1651 max_tx->agg.scd_query_mismatch);
1652 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1653 "agg frame_not_ready:",
1654 le32_to_cpu(tx->agg.frame_not_ready),
1655 accum_tx->agg.frame_not_ready,
1656 delta_tx->agg.frame_not_ready,
1657 max_tx->agg.frame_not_ready);
1658 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1659 "agg underrun:",
1660 le32_to_cpu(tx->agg.underrun),
1661 accum_tx->agg.underrun,
1662 delta_tx->agg.underrun, max_tx->agg.underrun);
1663 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1664 "agg bt_prio_kill:",
1665 le32_to_cpu(tx->agg.bt_prio_kill),
1666 accum_tx->agg.bt_prio_kill,
1667 delta_tx->agg.bt_prio_kill,
1668 max_tx->agg.bt_prio_kill);
1669 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1670 "agg rx_ba_rsp_cnt:",
1671 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
1672 accum_tx->agg.rx_ba_rsp_cnt,
1673 delta_tx->agg.rx_ba_rsp_cnt,
1674 max_tx->agg.rx_ba_rsp_cnt);
1675
1676 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1677 kfree(buf);
1678 return ret;
1679} 1073}
1680 1074
1681static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file, 1075static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
@@ -1683,107 +1077,8 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
1683 size_t count, loff_t *ppos) 1077 size_t count, loff_t *ppos)
1684{ 1078{
1685 struct iwl_priv *priv = file->private_data; 1079 struct iwl_priv *priv = file->private_data;
1686 int pos = 0; 1080 return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file,
1687 char *buf; 1081 user_buf, count, ppos);
1688 int bufsz = sizeof(struct statistics_general) * 10 + 300;
1689 ssize_t ret;
1690 struct statistics_general *general, *accum_general;
1691 struct statistics_general *delta_general, *max_general;
1692 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
1693 struct statistics_div *div, *accum_div, *delta_div, *max_div;
1694
1695 if (!iwl_is_alive(priv))
1696 return -EAGAIN;
1697
1698 buf = kzalloc(bufsz, GFP_KERNEL);
1699 if (!buf) {
1700 IWL_ERR(priv, "Can not allocate Buffer\n");
1701 return -ENOMEM;
1702 }
1703
1704 /* the statistic information display here is based on
1705 * the last statistics notification from uCode
1706 * might not reflect the current uCode activity
1707 */
1708 general = &priv->statistics.general;
1709 dbg = &priv->statistics.general.dbg;
1710 div = &priv->statistics.general.div;
1711 accum_general = &priv->accum_statistics.general;
1712 delta_general = &priv->delta_statistics.general;
1713 max_general = &priv->max_delta.general;
1714 accum_dbg = &priv->accum_statistics.general.dbg;
1715 delta_dbg = &priv->delta_statistics.general.dbg;
1716 max_dbg = &priv->max_delta.general.dbg;
1717 accum_div = &priv->accum_statistics.general.div;
1718 delta_div = &priv->delta_statistics.general.div;
1719 max_div = &priv->max_delta.general.div;
1720 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
1721 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
1722 "Statistics_General:");
1723 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_short_format,
1724 "temperature:",
1725 le32_to_cpu(general->temperature));
1726 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_short_format,
1727 "temperature_m:",
1728 le32_to_cpu(general->temperature_m));
1729 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1730 "burst_check:",
1731 le32_to_cpu(dbg->burst_check),
1732 accum_dbg->burst_check,
1733 delta_dbg->burst_check, max_dbg->burst_check);
1734 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1735 "burst_count:",
1736 le32_to_cpu(dbg->burst_count),
1737 accum_dbg->burst_count,
1738 delta_dbg->burst_count, max_dbg->burst_count);
1739 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1740 "sleep_time:",
1741 le32_to_cpu(general->sleep_time),
1742 accum_general->sleep_time,
1743 delta_general->sleep_time, max_general->sleep_time);
1744 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1745 "slots_out:",
1746 le32_to_cpu(general->slots_out),
1747 accum_general->slots_out,
1748 delta_general->slots_out, max_general->slots_out);
1749 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1750 "slots_idle:",
1751 le32_to_cpu(general->slots_idle),
1752 accum_general->slots_idle,
1753 delta_general->slots_idle, max_general->slots_idle);
1754 pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
1755 le32_to_cpu(general->ttl_timestamp));
1756 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1757 "tx_on_a:",
1758 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
1759 delta_div->tx_on_a, max_div->tx_on_a);
1760 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1761 "tx_on_b:",
1762 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
1763 delta_div->tx_on_b, max_div->tx_on_b);
1764 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1765 "exec_time:",
1766 le32_to_cpu(div->exec_time), accum_div->exec_time,
1767 delta_div->exec_time, max_div->exec_time);
1768 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1769 "probe_time:",
1770 le32_to_cpu(div->probe_time), accum_div->probe_time,
1771 delta_div->probe_time, max_div->probe_time);
1772 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1773 "rx_enable_counter:",
1774 le32_to_cpu(general->rx_enable_counter),
1775 accum_general->rx_enable_counter,
1776 delta_general->rx_enable_counter,
1777 max_general->rx_enable_counter);
1778 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1779 "num_of_sos_states:",
1780 le32_to_cpu(general->num_of_sos_states),
1781 accum_general->num_of_sos_states,
1782 delta_general->num_of_sos_states,
1783 max_general->num_of_sos_states);
1784 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1785 kfree(buf);
1786 return ret;
1787} 1082}
1788 1083
1789static ssize_t iwl_dbgfs_sensitivity_read(struct file *file, 1084static ssize_t iwl_dbgfs_sensitivity_read(struct file *file,
@@ -2341,10 +1636,11 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
2341 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR); 1636 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
2342 DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR); 1637 DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
2343 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR); 1638 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
1639 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
1640 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
1641 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
1642
2344 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) { 1643 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
2345 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
2346 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
2347 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
2348 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR); 1644 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
2349 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR); 1645 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
2350 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR); 1646 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 9466e909f553..58c69a5798d4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -58,7 +58,7 @@ extern struct iwl_cfg iwl5100_abg_cfg;
58extern struct iwl_cfg iwl5150_agn_cfg; 58extern struct iwl_cfg iwl5150_agn_cfg;
59extern struct iwl_cfg iwl5150_abg_cfg; 59extern struct iwl_cfg iwl5150_abg_cfg;
60extern struct iwl_cfg iwl6000i_2agn_cfg; 60extern struct iwl_cfg iwl6000i_2agn_cfg;
61extern struct iwl_cfg iwl6000i_g2_2agn_cfg; 61extern struct iwl_cfg iwl6000g2_2agn_cfg;
62extern struct iwl_cfg iwl6000i_2abg_cfg; 62extern struct iwl_cfg iwl6000i_2abg_cfg;
63extern struct iwl_cfg iwl6000i_2bg_cfg; 63extern struct iwl_cfg iwl6000i_2bg_cfg;
64extern struct iwl_cfg iwl6000_3agn_cfg; 64extern struct iwl_cfg iwl6000_3agn_cfg;
@@ -1049,12 +1049,10 @@ struct iwl_priv {
1049 struct iwl_calib_result calib_results[IWL_CALIB_MAX]; 1049 struct iwl_calib_result calib_results[IWL_CALIB_MAX];
1050 1050
1051 /* Scan related variables */ 1051 /* Scan related variables */
1052 unsigned long next_scan_jiffies;
1053 unsigned long scan_start; 1052 unsigned long scan_start;
1054 unsigned long scan_pass_start;
1055 unsigned long scan_start_tsf; 1053 unsigned long scan_start_tsf;
1056 void *scan; 1054 void *scan_cmd;
1057 int scan_bands; 1055 enum ieee80211_band scan_band;
1058 struct cfg80211_scan_request *scan_request; 1056 struct cfg80211_scan_request *scan_request;
1059 bool is_internal_short_scan; 1057 bool is_internal_short_scan;
1060 u8 scan_tx_ant[IEEE80211_NUM_BANDS]; 1058 u8 scan_tx_ant[IEEE80211_NUM_BANDS];
@@ -1204,6 +1202,11 @@ struct iwl_priv {
1204 struct delayed_work rfkill_poll; 1202 struct delayed_work rfkill_poll;
1205 1203
1206 struct iwl3945_notif_statistics statistics; 1204 struct iwl3945_notif_statistics statistics;
1205#ifdef CONFIG_IWLWIFI_DEBUG
1206 struct iwl3945_notif_statistics accum_statistics;
1207 struct iwl3945_notif_statistics delta_statistics;
1208 struct iwl3945_notif_statistics max_delta;
1209#endif
1207 1210
1208 u32 sta_supp_rates; 1211 u32 sta_supp_rates;
1209 int last_rx_rssi; /* From Rx packet statistics */ 1212 int last_rx_rssi; /* From Rx packet statistics */
@@ -1259,11 +1262,11 @@ struct iwl_priv {
1259 struct work_struct scan_completed; 1262 struct work_struct scan_completed;
1260 struct work_struct rx_replenish; 1263 struct work_struct rx_replenish;
1261 struct work_struct abort_scan; 1264 struct work_struct abort_scan;
1262 struct work_struct request_scan;
1263 struct work_struct beacon_update; 1265 struct work_struct beacon_update;
1264 struct work_struct tt_work; 1266 struct work_struct tt_work;
1265 struct work_struct ct_enter; 1267 struct work_struct ct_enter;
1266 struct work_struct ct_exit; 1268 struct work_struct ct_exit;
1269 struct work_struct start_internal_scan;
1267 1270
1268 struct tasklet_struct irq_tasklet; 1271 struct tasklet_struct irq_tasklet;
1269 1272
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index cb6d50b78140..95aa202c85e3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -172,22 +172,22 @@ struct iwl_eeprom_enhanced_txpwr {
172#define EEPROM_5000_TX_POWER_VERSION (4) 172#define EEPROM_5000_TX_POWER_VERSION (4)
173#define EEPROM_5000_EEPROM_VERSION (0x11A) 173#define EEPROM_5000_EEPROM_VERSION (0x11A)
174 174
175/*5000 calibrations */ 175/* 5000 and up calibration */
176#define EEPROM_5000_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION) 176#define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
177#define EEPROM_5000_XTAL ((2*0x128) | EEPROM_5000_CALIB_ALL) 177#define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL)
178#define EEPROM_5000_TEMPERATURE ((2*0x12A) | EEPROM_5000_CALIB_ALL) 178
179 179/* 5000 temperature */
180/* 5000 links */ 180#define EEPROM_5000_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
181#define EEPROM_5000_LINK_HOST (2*0x64) 181
182#define EEPROM_5000_LINK_GENERAL (2*0x65) 182/* agn links */
183#define EEPROM_5000_LINK_REGULATORY (2*0x66) 183#define EEPROM_LINK_HOST (2*0x64)
184#define EEPROM_5000_LINK_CALIBRATION (2*0x67) 184#define EEPROM_LINK_GENERAL (2*0x65)
185#define EEPROM_5000_LINK_PROCESS_ADJST (2*0x68) 185#define EEPROM_LINK_REGULATORY (2*0x66)
186#define EEPROM_5000_LINK_OTHERS (2*0x69) 186#define EEPROM_LINK_CALIBRATION (2*0x67)
187 187#define EEPROM_LINK_PROCESS_ADJST (2*0x68)
188/* 5000 regulatory - indirect access */ 188#define EEPROM_LINK_OTHERS (2*0x69)
189#define EEPROM_5000_REG_SKU_ID ((0x02)\ 189
190 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 4 bytes */ 190/* agn regulatory - indirect access */
191#define EEPROM_REG_BAND_1_CHANNELS ((0x08)\ 191#define EEPROM_REG_BAND_1_CHANNELS ((0x08)\
192 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 28 bytes */ 192 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 28 bytes */
193#define EEPROM_REG_BAND_2_CHANNELS ((0x26)\ 193#define EEPROM_REG_BAND_2_CHANNELS ((0x26)\
@@ -203,6 +203,10 @@ struct iwl_eeprom_enhanced_txpwr {
203#define EEPROM_REG_BAND_52_HT40_CHANNELS ((0x92)\ 203#define EEPROM_REG_BAND_52_HT40_CHANNELS ((0x92)\
204 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */ 204 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
205 205
206/* 6000 regulatory - indirect access */
207#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS ((0x80)\
208 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
209
206/* 6000 and up regulatory tx power - indirect access */ 210/* 6000 and up regulatory tx power - indirect access */
207/* max. elements per section */ 211/* max. elements per section */
208#define EEPROM_MAX_TXPOWER_SECTION_ELEMENTS (8) 212#define EEPROM_MAX_TXPOWER_SECTION_ELEMENTS (8)
@@ -272,6 +276,10 @@ struct iwl_eeprom_enhanced_txpwr {
272#define EEPROM_6050_TX_POWER_VERSION (4) 276#define EEPROM_6050_TX_POWER_VERSION (4)
273#define EEPROM_6050_EEPROM_VERSION (0x532) 277#define EEPROM_6050_EEPROM_VERSION (0x532)
274 278
279/* 6x00g2 Specific */
280#define EEPROM_6000G2_TX_POWER_VERSION (6)
281#define EEPROM_6000G2_EEPROM_VERSION (0x709)
282
275/* OTP */ 283/* OTP */
276/* lower blocks contain EEPROM image and calibration data */ 284/* lower blocks contain EEPROM image and calibration data */
277#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */ 285#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 5944de7a98a2..b1f101caf19d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -529,48 +529,48 @@
529#define IWL_SCD_TXFIFO_POS_RA (4) 529#define IWL_SCD_TXFIFO_POS_RA (4)
530#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF) 530#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
531 531
532/* 5000 SCD */ 532/* agn SCD */
533#define IWL50_SCD_QUEUE_STTS_REG_POS_TXF (0) 533#define IWLAGN_SCD_QUEUE_STTS_REG_POS_TXF (0)
534#define IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE (3) 534#define IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE (3)
535#define IWL50_SCD_QUEUE_STTS_REG_POS_WSL (4) 535#define IWLAGN_SCD_QUEUE_STTS_REG_POS_WSL (4)
536#define IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19) 536#define IWLAGN_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
537#define IWL50_SCD_QUEUE_STTS_REG_MSK (0x00FF0000) 537#define IWLAGN_SCD_QUEUE_STTS_REG_MSK (0x00FF0000)
538 538
539#define IWL50_SCD_QUEUE_CTX_REG1_CREDIT_POS (8) 539#define IWLAGN_SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
540#define IWL50_SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00) 540#define IWLAGN_SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
541#define IWL50_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24) 541#define IWLAGN_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24)
542#define IWL50_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000) 542#define IWLAGN_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000)
543#define IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0) 543#define IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0)
544#define IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F) 544#define IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F)
545#define IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) 545#define IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
546#define IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000) 546#define IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
547 547
548#define IWL50_SCD_CONTEXT_DATA_OFFSET (0x600) 548#define IWLAGN_SCD_CONTEXT_DATA_OFFSET (0x600)
549#define IWL50_SCD_TX_STTS_BITMAP_OFFSET (0x7B1) 549#define IWLAGN_SCD_TX_STTS_BITMAP_OFFSET (0x7B1)
550#define IWL50_SCD_TRANSLATE_TBL_OFFSET (0x7E0) 550#define IWLAGN_SCD_TRANSLATE_TBL_OFFSET (0x7E0)
551 551
552#define IWL50_SCD_CONTEXT_QUEUE_OFFSET(x)\ 552#define IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(x)\
553 (IWL50_SCD_CONTEXT_DATA_OFFSET + ((x) * 8)) 553 (IWLAGN_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
554 554
555#define IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \ 555#define IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
556 ((IWL50_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffc) 556 ((IWLAGN_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffc)
557 557
558#define IWL50_SCD_QUEUECHAIN_SEL_ALL(x) (((1<<(x)) - 1) &\ 558#define IWLAGN_SCD_QUEUECHAIN_SEL_ALL(x) (((1<<(x)) - 1) &\
559 (~(1<<IWL_CMD_QUEUE_NUM))) 559 (~(1<<IWL_CMD_QUEUE_NUM)))
560 560
561#define IWL50_SCD_BASE (PRPH_BASE + 0xa02c00) 561#define IWLAGN_SCD_BASE (PRPH_BASE + 0xa02c00)
562 562
563#define IWL50_SCD_SRAM_BASE_ADDR (IWL50_SCD_BASE + 0x0) 563#define IWLAGN_SCD_SRAM_BASE_ADDR (IWLAGN_SCD_BASE + 0x0)
564#define IWL50_SCD_DRAM_BASE_ADDR (IWL50_SCD_BASE + 0x8) 564#define IWLAGN_SCD_DRAM_BASE_ADDR (IWLAGN_SCD_BASE + 0x8)
565#define IWL50_SCD_AIT (IWL50_SCD_BASE + 0x0c) 565#define IWLAGN_SCD_AIT (IWLAGN_SCD_BASE + 0x0c)
566#define IWL50_SCD_TXFACT (IWL50_SCD_BASE + 0x10) 566#define IWLAGN_SCD_TXFACT (IWLAGN_SCD_BASE + 0x10)
567#define IWL50_SCD_ACTIVE (IWL50_SCD_BASE + 0x14) 567#define IWLAGN_SCD_ACTIVE (IWLAGN_SCD_BASE + 0x14)
568#define IWL50_SCD_QUEUE_WRPTR(x) (IWL50_SCD_BASE + 0x18 + (x) * 4) 568#define IWLAGN_SCD_QUEUE_WRPTR(x) (IWLAGN_SCD_BASE + 0x18 + (x) * 4)
569#define IWL50_SCD_QUEUE_RDPTR(x) (IWL50_SCD_BASE + 0x68 + (x) * 4) 569#define IWLAGN_SCD_QUEUE_RDPTR(x) (IWLAGN_SCD_BASE + 0x68 + (x) * 4)
570#define IWL50_SCD_QUEUECHAIN_SEL (IWL50_SCD_BASE + 0xe8) 570#define IWLAGN_SCD_QUEUECHAIN_SEL (IWLAGN_SCD_BASE + 0xe8)
571#define IWL50_SCD_AGGR_SEL (IWL50_SCD_BASE + 0x248) 571#define IWLAGN_SCD_AGGR_SEL (IWLAGN_SCD_BASE + 0x248)
572#define IWL50_SCD_INTERRUPT_MASK (IWL50_SCD_BASE + 0x108) 572#define IWLAGN_SCD_INTERRUPT_MASK (IWLAGN_SCD_BASE + 0x108)
573#define IWL50_SCD_QUEUE_STATUS_BITS(x) (IWL50_SCD_BASE + 0x10c + (x) * 4) 573#define IWLAGN_SCD_QUEUE_STATUS_BITS(x) (IWLAGN_SCD_BASE + 0x10c + (x) * 4)
574 574
575/*********************** END TX SCHEDULER *************************************/ 575/*********************** END TX SCHEDULER *************************************/
576 576
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index ae981932ce61..d12fd5553846 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -69,9 +69,8 @@ int iwl_scan_cancel(struct iwl_priv *priv)
69 } 69 }
70 70
71 if (test_bit(STATUS_SCANNING, &priv->status)) { 71 if (test_bit(STATUS_SCANNING, &priv->status)) {
72 if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 72 if (!test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
73 IWL_DEBUG_SCAN(priv, "Queuing scan abort.\n"); 73 IWL_DEBUG_SCAN(priv, "Queuing scan abort.\n");
74 set_bit(STATUS_SCAN_ABORTING, &priv->status);
75 queue_work(priv->workqueue, &priv->abort_scan); 74 queue_work(priv->workqueue, &priv->abort_scan);
76 75
77 } else 76 } else
@@ -201,9 +200,6 @@ static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
201 le32_to_cpu(notif->statistics[0]), 200 le32_to_cpu(notif->statistics[0]),
202 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf); 201 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
203#endif 202#endif
204
205 if (!priv->is_internal_short_scan)
206 priv->next_scan_jiffies = 0;
207} 203}
208 204
209/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ 205/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
@@ -223,49 +219,24 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
223 /* The HW is no longer scanning */ 219 /* The HW is no longer scanning */
224 clear_bit(STATUS_SCAN_HW, &priv->status); 220 clear_bit(STATUS_SCAN_HW, &priv->status);
225 221
226 IWL_DEBUG_INFO(priv, "Scan pass on %sGHz took %dms\n", 222 IWL_DEBUG_INFO(priv, "Scan on %sGHz took %dms\n",
227 (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ? 223 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
228 "2.4" : "5.2",
229 jiffies_to_msecs(elapsed_jiffies 224 jiffies_to_msecs(elapsed_jiffies
230 (priv->scan_pass_start, jiffies))); 225 (priv->scan_start, jiffies)));
231 226
232 /* Remove this scanned band from the list of pending 227 /*
233 * bands to scan, band G precedes A in order of scanning 228 * If a request to abort was given, or the scan did not succeed
234 * as seen in iwl_bg_request_scan */
235 if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ))
236 priv->scan_bands &= ~BIT(IEEE80211_BAND_2GHZ);
237 else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ))
238 priv->scan_bands &= ~BIT(IEEE80211_BAND_5GHZ);
239
240 /* If a request to abort was given, or the scan did not succeed
241 * then we reset the scan state machine and terminate, 229 * then we reset the scan state machine and terminate,
242 * re-queuing another scan if one has been requested */ 230 * re-queuing another scan if one has been requested
243 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 231 */
232 if (test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status))
244 IWL_DEBUG_INFO(priv, "Aborted scan completed.\n"); 233 IWL_DEBUG_INFO(priv, "Aborted scan completed.\n");
245 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
246 } else {
247 /* If there are more bands on this scan pass reschedule */
248 if (priv->scan_bands)
249 goto reschedule;
250 }
251
252 if (!priv->is_internal_short_scan)
253 priv->next_scan_jiffies = 0;
254 234
255 IWL_DEBUG_INFO(priv, "Setting scan to off\n"); 235 IWL_DEBUG_INFO(priv, "Setting scan to off\n");
256 236
257 clear_bit(STATUS_SCANNING, &priv->status); 237 clear_bit(STATUS_SCANNING, &priv->status);
258 238
259 IWL_DEBUG_INFO(priv, "Scan took %dms\n",
260 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
261
262 queue_work(priv->workqueue, &priv->scan_completed); 239 queue_work(priv->workqueue, &priv->scan_completed);
263
264 return;
265
266reschedule:
267 priv->scan_pass_start = jiffies;
268 queue_work(priv->workqueue, &priv->request_scan);
269} 240}
270 241
271void iwl_setup_rx_scan_handlers(struct iwl_priv *priv) 242void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
@@ -314,150 +285,6 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
314} 285}
315EXPORT_SYMBOL(iwl_get_passive_dwell_time); 286EXPORT_SYMBOL(iwl_get_passive_dwell_time);
316 287
317static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
318 enum ieee80211_band band,
319 struct iwl_scan_channel *scan_ch)
320{
321 const struct ieee80211_supported_band *sband;
322 const struct iwl_channel_info *ch_info;
323 u16 passive_dwell = 0;
324 u16 active_dwell = 0;
325 int i, added = 0;
326 u16 channel = 0;
327
328 sband = iwl_get_hw_mode(priv, band);
329 if (!sband) {
330 IWL_ERR(priv, "invalid band\n");
331 return added;
332 }
333
334 active_dwell = iwl_get_active_dwell_time(priv, band, 0);
335 passive_dwell = iwl_get_passive_dwell_time(priv, band);
336
337 if (passive_dwell <= active_dwell)
338 passive_dwell = active_dwell + 1;
339
340 /* only scan single channel, good enough to reset the RF */
341 /* pick the first valid not in-use channel */
342 if (band == IEEE80211_BAND_5GHZ) {
343 for (i = 14; i < priv->channel_count; i++) {
344 if (priv->channel_info[i].channel !=
345 le16_to_cpu(priv->staging_rxon.channel)) {
346 channel = priv->channel_info[i].channel;
347 ch_info = iwl_get_channel_info(priv,
348 band, channel);
349 if (is_channel_valid(ch_info))
350 break;
351 }
352 }
353 } else {
354 for (i = 0; i < 14; i++) {
355 if (priv->channel_info[i].channel !=
356 le16_to_cpu(priv->staging_rxon.channel)) {
357 channel =
358 priv->channel_info[i].channel;
359 ch_info = iwl_get_channel_info(priv,
360 band, channel);
361 if (is_channel_valid(ch_info))
362 break;
363 }
364 }
365 }
366 if (channel) {
367 scan_ch->channel = cpu_to_le16(channel);
368 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
369 scan_ch->active_dwell = cpu_to_le16(active_dwell);
370 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
371 /* Set txpower levels to defaults */
372 scan_ch->dsp_atten = 110;
373 if (band == IEEE80211_BAND_5GHZ)
374 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
375 else
376 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
377 added++;
378 } else
379 IWL_ERR(priv, "no valid channel found\n");
380 return added;
381}
382
383static int iwl_get_channels_for_scan(struct iwl_priv *priv,
384 enum ieee80211_band band,
385 u8 is_active, u8 n_probes,
386 struct iwl_scan_channel *scan_ch)
387{
388 struct ieee80211_channel *chan;
389 const struct ieee80211_supported_band *sband;
390 const struct iwl_channel_info *ch_info;
391 u16 passive_dwell = 0;
392 u16 active_dwell = 0;
393 int added, i;
394 u16 channel;
395
396 sband = iwl_get_hw_mode(priv, band);
397 if (!sband)
398 return 0;
399
400 active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
401 passive_dwell = iwl_get_passive_dwell_time(priv, band);
402
403 if (passive_dwell <= active_dwell)
404 passive_dwell = active_dwell + 1;
405
406 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
407 chan = priv->scan_request->channels[i];
408
409 if (chan->band != band)
410 continue;
411
412 channel = ieee80211_frequency_to_channel(chan->center_freq);
413 scan_ch->channel = cpu_to_le16(channel);
414
415 ch_info = iwl_get_channel_info(priv, band, channel);
416 if (!is_channel_valid(ch_info)) {
417 IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
418 channel);
419 continue;
420 }
421
422 if (!is_active || is_channel_passive(ch_info) ||
423 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
424 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
425 else
426 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
427
428 if (n_probes)
429 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
430
431 scan_ch->active_dwell = cpu_to_le16(active_dwell);
432 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
433
434 /* Set txpower levels to defaults */
435 scan_ch->dsp_atten = 110;
436
437 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
438 * power level:
439 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
440 */
441 if (band == IEEE80211_BAND_5GHZ)
442 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
443 else
444 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
445
446 IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
447 channel, le32_to_cpu(scan_ch->type),
448 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
449 "ACTIVE" : "PASSIVE",
450 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
451 active_dwell : passive_dwell);
452
453 scan_ch++;
454 added++;
455 }
456
457 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
458 return added;
459}
460
461void iwl_init_scan_params(struct iwl_priv *priv) 288void iwl_init_scan_params(struct iwl_priv *priv)
462{ 289{
463 u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1; 290 u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
@@ -470,30 +297,34 @@ EXPORT_SYMBOL(iwl_init_scan_params);
470 297
471static int iwl_scan_initiate(struct iwl_priv *priv) 298static int iwl_scan_initiate(struct iwl_priv *priv)
472{ 299{
300 WARN_ON(!mutex_is_locked(&priv->mutex));
301
473 IWL_DEBUG_INFO(priv, "Starting scan...\n"); 302 IWL_DEBUG_INFO(priv, "Starting scan...\n");
474 set_bit(STATUS_SCANNING, &priv->status); 303 set_bit(STATUS_SCANNING, &priv->status);
475 priv->is_internal_short_scan = false; 304 priv->is_internal_short_scan = false;
476 priv->scan_start = jiffies; 305 priv->scan_start = jiffies;
477 priv->scan_pass_start = priv->scan_start;
478 306
479 queue_work(priv->workqueue, &priv->request_scan); 307 if (WARN_ON(!priv->cfg->ops->utils->request_scan))
308 return -EOPNOTSUPP;
309
310 priv->cfg->ops->utils->request_scan(priv);
480 311
481 return 0; 312 return 0;
482} 313}
483 314
484#define IWL_DELAY_NEXT_SCAN (HZ*2)
485
486int iwl_mac_hw_scan(struct ieee80211_hw *hw, 315int iwl_mac_hw_scan(struct ieee80211_hw *hw,
487 struct cfg80211_scan_request *req) 316 struct ieee80211_vif *vif,
317 struct cfg80211_scan_request *req)
488{ 318{
489 unsigned long flags;
490 struct iwl_priv *priv = hw->priv; 319 struct iwl_priv *priv = hw->priv;
491 int ret, i; 320 int ret;
492 321
493 IWL_DEBUG_MAC80211(priv, "enter\n"); 322 IWL_DEBUG_MAC80211(priv, "enter\n");
494 323
324 if (req->n_channels == 0)
325 return -EINVAL;
326
495 mutex_lock(&priv->mutex); 327 mutex_lock(&priv->mutex);
496 spin_lock_irqsave(&priv->lock, flags);
497 328
498 if (!iwl_is_ready_rf(priv)) { 329 if (!iwl_is_ready_rf(priv)) {
499 ret = -EIO; 330 ret = -EIO;
@@ -513,22 +344,8 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
513 goto out_unlock; 344 goto out_unlock;
514 } 345 }
515 346
516 /* We don't schedule scan within next_scan_jiffies period. 347 /* mac80211 will only ask for one band at a time */
517 * Avoid scanning during possible EAPOL exchange, return 348 priv->scan_band = req->channels[0]->band;
518 * success immediately.
519 */
520 if (priv->next_scan_jiffies &&
521 time_after(priv->next_scan_jiffies, jiffies)) {
522 IWL_DEBUG_SCAN(priv, "scan rejected: within next scan period\n");
523 queue_work(priv->workqueue, &priv->scan_completed);
524 ret = 0;
525 goto out_unlock;
526 }
527
528 priv->scan_bands = 0;
529 for (i = 0; i < req->n_channels; i++)
530 priv->scan_bands |= BIT(req->channels[i]->band);
531
532 priv->scan_request = req; 349 priv->scan_request = req;
533 350
534 ret = iwl_scan_initiate(priv); 351 ret = iwl_scan_initiate(priv);
@@ -536,7 +353,6 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
536 IWL_DEBUG_MAC80211(priv, "leave\n"); 353 IWL_DEBUG_MAC80211(priv, "leave\n");
537 354
538out_unlock: 355out_unlock:
539 spin_unlock_irqrestore(&priv->lock, flags);
540 mutex_unlock(&priv->mutex); 356 mutex_unlock(&priv->mutex);
541 357
542 return ret; 358 return ret;
@@ -547,42 +363,46 @@ EXPORT_SYMBOL(iwl_mac_hw_scan);
547 * internal short scan, this function should only been called while associated. 363 * internal short scan, this function should only been called while associated.
548 * It will reset and tune the radio to prevent possible RF related problem 364 * It will reset and tune the radio to prevent possible RF related problem
549 */ 365 */
550int iwl_internal_short_hw_scan(struct iwl_priv *priv) 366void iwl_internal_short_hw_scan(struct iwl_priv *priv)
551{ 367{
552 int ret = 0; 368 queue_work(priv->workqueue, &priv->start_internal_scan);
369}
370
371static void iwl_bg_start_internal_scan(struct work_struct *work)
372{
373 struct iwl_priv *priv =
374 container_of(work, struct iwl_priv, start_internal_scan);
375
376 mutex_lock(&priv->mutex);
553 377
554 if (!iwl_is_ready_rf(priv)) { 378 if (!iwl_is_ready_rf(priv)) {
555 ret = -EIO;
556 IWL_DEBUG_SCAN(priv, "not ready or exit pending\n"); 379 IWL_DEBUG_SCAN(priv, "not ready or exit pending\n");
557 goto out; 380 goto unlock;
558 } 381 }
382
559 if (test_bit(STATUS_SCANNING, &priv->status)) { 383 if (test_bit(STATUS_SCANNING, &priv->status)) {
560 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); 384 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
561 ret = -EAGAIN; 385 goto unlock;
562 goto out;
563 } 386 }
387
564 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 388 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
565 IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n"); 389 IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
566 ret = -EAGAIN; 390 goto unlock;
567 goto out;
568 } 391 }
569 392
570 priv->scan_bands = 0; 393 priv->scan_band = priv->band;
571 if (priv->band == IEEE80211_BAND_5GHZ)
572 priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
573 else
574 priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
575 394
576 IWL_DEBUG_SCAN(priv, "Start internal short scan...\n"); 395 IWL_DEBUG_SCAN(priv, "Start internal short scan...\n");
577 set_bit(STATUS_SCANNING, &priv->status); 396 set_bit(STATUS_SCANNING, &priv->status);
578 priv->is_internal_short_scan = true; 397 priv->is_internal_short_scan = true;
579 queue_work(priv->workqueue, &priv->request_scan);
580 398
581out: 399 if (WARN_ON(!priv->cfg->ops->utils->request_scan))
582 return ret; 400 goto unlock;
583}
584 401
585#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ) 402 priv->cfg->ops->utils->request_scan(priv);
403 unlock:
404 mutex_unlock(&priv->mutex);
405}
586 406
587void iwl_bg_scan_check(struct work_struct *data) 407void iwl_bg_scan_check(struct work_struct *data)
588{ 408{
@@ -645,275 +465,15 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
645 if (WARN_ON(left < ie_len)) 465 if (WARN_ON(left < ie_len))
646 return len; 466 return len;
647 467
648 if (ies) 468 if (ies && ie_len) {
649 memcpy(pos, ies, ie_len); 469 memcpy(pos, ies, ie_len);
650 len += ie_len; 470 len += ie_len;
651 left -= ie_len; 471 }
652 472
653 return (u16)len; 473 return (u16)len;
654} 474}
655EXPORT_SYMBOL(iwl_fill_probe_req); 475EXPORT_SYMBOL(iwl_fill_probe_req);
656 476
657static void iwl_bg_request_scan(struct work_struct *data)
658{
659 struct iwl_priv *priv =
660 container_of(data, struct iwl_priv, request_scan);
661 struct iwl_host_cmd cmd = {
662 .id = REPLY_SCAN_CMD,
663 .len = sizeof(struct iwl_scan_cmd),
664 .flags = CMD_SIZE_HUGE,
665 };
666 struct iwl_scan_cmd *scan;
667 struct ieee80211_conf *conf = NULL;
668 u32 rate_flags = 0;
669 u16 cmd_len;
670 u16 rx_chain = 0;
671 enum ieee80211_band band;
672 u8 n_probes = 0;
673 u8 rx_ant = priv->hw_params.valid_rx_ant;
674 u8 rate;
675 bool is_active = false;
676 int chan_mod;
677 u8 active_chains;
678
679 conf = ieee80211_get_hw_conf(priv->hw);
680
681 mutex_lock(&priv->mutex);
682
683 cancel_delayed_work(&priv->scan_check);
684
685 if (!iwl_is_ready(priv)) {
686 IWL_WARN(priv, "request scan called when driver not ready.\n");
687 goto done;
688 }
689
690 /* Make sure the scan wasn't canceled before this queued work
691 * was given the chance to run... */
692 if (!test_bit(STATUS_SCANNING, &priv->status))
693 goto done;
694
695 /* This should never be called or scheduled if there is currently
696 * a scan active in the hardware. */
697 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
698 IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests in parallel. "
699 "Ignoring second request.\n");
700 goto done;
701 }
702
703 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
704 IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n");
705 goto done;
706 }
707
708 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
709 IWL_DEBUG_HC(priv, "Scan request while abort pending. Queuing.\n");
710 goto done;
711 }
712
713 if (iwl_is_rfkill(priv)) {
714 IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n");
715 goto done;
716 }
717
718 if (!test_bit(STATUS_READY, &priv->status)) {
719 IWL_DEBUG_HC(priv, "Scan request while uninitialized. Queuing.\n");
720 goto done;
721 }
722
723 if (!priv->scan_bands) {
724 IWL_DEBUG_HC(priv, "Aborting scan due to no requested bands\n");
725 goto done;
726 }
727
728 if (!priv->scan) {
729 priv->scan = kmalloc(sizeof(struct iwl_scan_cmd) +
730 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
731 if (!priv->scan) {
732 IWL_DEBUG_SCAN(priv,
733 "fail to allocate memory for scan\n");
734 goto done;
735 }
736 }
737 scan = priv->scan;
738 memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
739
740 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
741 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
742
743 if (iwl_is_associated(priv)) {
744 u16 interval = 0;
745 u32 extra;
746 u32 suspend_time = 100;
747 u32 scan_suspend_time = 100;
748 unsigned long flags;
749
750 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
751 spin_lock_irqsave(&priv->lock, flags);
752 interval = priv->beacon_int;
753 spin_unlock_irqrestore(&priv->lock, flags);
754
755 scan->suspend_time = 0;
756 scan->max_out_time = cpu_to_le32(200 * 1024);
757 if (!interval)
758 interval = suspend_time;
759
760 extra = (suspend_time / interval) << 22;
761 scan_suspend_time = (extra |
762 ((suspend_time % interval) * 1024));
763 scan->suspend_time = cpu_to_le32(scan_suspend_time);
764 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
765 scan_suspend_time, interval);
766 }
767
768 if (priv->is_internal_short_scan) {
769 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
770 } else if (priv->scan_request->n_ssids) {
771 int i, p = 0;
772 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
773 for (i = 0; i < priv->scan_request->n_ssids; i++) {
774 /* always does wildcard anyway */
775 if (!priv->scan_request->ssids[i].ssid_len)
776 continue;
777 scan->direct_scan[p].id = WLAN_EID_SSID;
778 scan->direct_scan[p].len =
779 priv->scan_request->ssids[i].ssid_len;
780 memcpy(scan->direct_scan[p].ssid,
781 priv->scan_request->ssids[i].ssid,
782 priv->scan_request->ssids[i].ssid_len);
783 n_probes++;
784 p++;
785 }
786 is_active = true;
787 } else
788 IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
789
790 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
791 scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
792 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
793
794
795 if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) {
796 band = IEEE80211_BAND_2GHZ;
797 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
798 chan_mod = le32_to_cpu(priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_MSK)
799 >> RXON_FLG_CHANNEL_MODE_POS;
800 if (chan_mod == CHANNEL_MODE_PURE_40) {
801 rate = IWL_RATE_6M_PLCP;
802 } else {
803 rate = IWL_RATE_1M_PLCP;
804 rate_flags = RATE_MCS_CCK_MSK;
805 }
806 scan->good_CRC_th = 0;
807 } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
808 band = IEEE80211_BAND_5GHZ;
809 rate = IWL_RATE_6M_PLCP;
810 /*
811 * If active scaning is requested but a certain channel
812 * is marked passive, we can do active scanning if we
813 * detect transmissions.
814 */
815 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH : 0;
816
817 /* Force use of chains B and C (0x6) for scan Rx
818 * Avoid A (0x1) for the device has off-channel reception
819 * on A-band.
820 */
821 if (priv->cfg->off_channel_workaround)
822 rx_ant = ANT_BC;
823 } else {
824 IWL_WARN(priv, "Invalid scan band count\n");
825 goto done;
826 }
827
828 priv->scan_tx_ant[band] =
829 iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band]);
830 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
831 scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
832
833 /* In power save mode use one chain, otherwise use all chains */
834 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
835 /* rx_ant has been set to all valid chains previously */
836 active_chains = rx_ant &
837 ((u8)(priv->chain_noise_data.active_chains));
838 if (!active_chains)
839 active_chains = rx_ant;
840
841 IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
842 priv->chain_noise_data.active_chains);
843
844 rx_ant = first_antenna(active_chains);
845 }
846 /* MIMO is not used here, but value is required */
847 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
848 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
849 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
850 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
851 scan->rx_chain = cpu_to_le16(rx_chain);
852 if (!priv->is_internal_short_scan) {
853 cmd_len = iwl_fill_probe_req(priv,
854 (struct ieee80211_mgmt *)scan->data,
855 priv->scan_request->ie,
856 priv->scan_request->ie_len,
857 IWL_MAX_SCAN_SIZE - sizeof(*scan));
858 } else {
859 cmd_len = iwl_fill_probe_req(priv,
860 (struct ieee80211_mgmt *)scan->data,
861 NULL, 0,
862 IWL_MAX_SCAN_SIZE - sizeof(*scan));
863
864 }
865 scan->tx_cmd.len = cpu_to_le16(cmd_len);
866 if (iwl_is_monitor_mode(priv))
867 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
868
869 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
870 RXON_FILTER_BCON_AWARE_MSK);
871
872 if (priv->is_internal_short_scan) {
873 scan->channel_count =
874 iwl_get_single_channel_for_scan(priv, band,
875 (void *)&scan->data[le16_to_cpu(
876 scan->tx_cmd.len)]);
877 } else {
878 scan->channel_count =
879 iwl_get_channels_for_scan(priv, band,
880 is_active, n_probes,
881 (void *)&scan->data[le16_to_cpu(
882 scan->tx_cmd.len)]);
883 }
884 if (scan->channel_count == 0) {
885 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
886 goto done;
887 }
888
889 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
890 scan->channel_count * sizeof(struct iwl_scan_channel);
891 cmd.data = scan;
892 scan->len = cpu_to_le16(cmd.len);
893
894 set_bit(STATUS_SCAN_HW, &priv->status);
895 if (iwl_send_cmd_sync(priv, &cmd))
896 goto done;
897
898 queue_delayed_work(priv->workqueue, &priv->scan_check,
899 IWL_SCAN_CHECK_WATCHDOG);
900
901 mutex_unlock(&priv->mutex);
902 return;
903
904 done:
905 /* Cannot perform scan. Make sure we clear scanning
906 * bits from status so next scan request can be performed.
907 * If we don't clear scanning status bit here all next scan
908 * will fail
909 */
910 clear_bit(STATUS_SCAN_HW, &priv->status);
911 clear_bit(STATUS_SCANNING, &priv->status);
912 /* inform mac80211 scan aborted */
913 queue_work(priv->workqueue, &priv->scan_completed);
914 mutex_unlock(&priv->mutex);
915}
916
917void iwl_bg_abort_scan(struct work_struct *work) 477void iwl_bg_abort_scan(struct work_struct *work)
918{ 478{
919 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan); 479 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
@@ -961,8 +521,8 @@ EXPORT_SYMBOL(iwl_bg_scan_completed);
961void iwl_setup_scan_deferred_work(struct iwl_priv *priv) 521void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
962{ 522{
963 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); 523 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
964 INIT_WORK(&priv->request_scan, iwl_bg_request_scan);
965 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); 524 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
525 INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
966 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); 526 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
967} 527}
968EXPORT_SYMBOL(iwl_setup_scan_deferred_work); 528EXPORT_SYMBOL(iwl_setup_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index d86ecd2f9ec2..db934476b5e9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -451,7 +451,17 @@ static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, bool is_ap)
451 451
452 link_cmd.general_params.single_stream_ant_msk = 452 link_cmd.general_params.single_stream_ant_msk =
453 first_antenna(priv->hw_params.valid_tx_ant); 453 first_antenna(priv->hw_params.valid_tx_ant);
454 link_cmd.general_params.dual_stream_ant_msk = 3; 454
455 link_cmd.general_params.dual_stream_ant_msk =
456 priv->hw_params.valid_tx_ant &
457 ~first_antenna(priv->hw_params.valid_tx_ant);
458 if (!link_cmd.general_params.dual_stream_ant_msk) {
459 link_cmd.general_params.dual_stream_ant_msk = ANT_AB;
460 } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
461 link_cmd.general_params.dual_stream_ant_msk =
462 priv->hw_params.valid_tx_ant;
463 }
464
455 link_cmd.agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; 465 link_cmd.agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
456 link_cmd.agg_params.agg_time_limit = 466 link_cmd.agg_params.agg_time_limit =
457 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); 467 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
@@ -1196,7 +1206,6 @@ int iwl_send_lq_cmd(struct iwl_priv *priv,
1196 iwl_dump_lq_cmd(priv, lq); 1206 iwl_dump_lq_cmd(priv, lq);
1197 BUG_ON(init && (cmd.flags & CMD_ASYNC)); 1207 BUG_ON(init && (cmd.flags & CMD_ASYNC));
1198 1208
1199 iwl_dump_lq_cmd(priv, lq);
1200 ret = iwl_send_cmd(priv, &cmd); 1209 ret = iwl_send_cmd(priv, &cmd);
1201 if (ret || (cmd.flags & CMD_ASYNC)) 1210 if (ret || (cmd.flags & CMD_ASYNC))
1202 return ret; 1211 return ret;
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 9f362024a29c..c7e1d7d09e02 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -957,7 +957,7 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
957 * statistics request from the host as well as for the periodic 957 * statistics request from the host as well as for the periodic
958 * statistics notifications (after received beacons) from the uCode. 958 * statistics notifications (after received beacons) from the uCode.
959 */ 959 */
960 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_hw_rx_statistics; 960 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
961 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics; 961 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
962 962
963 iwl_setup_rx_scan_handlers(priv); 963 iwl_setup_rx_scan_handlers(priv);
@@ -2527,7 +2527,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2527 } 2527 }
2528 2528
2529 /* Configure Bluetooth device coexistence support */ 2529 /* Configure Bluetooth device coexistence support */
2530 iwl_send_bt_config(priv); 2530 priv->cfg->ops->hcmd->send_bt_config(priv);
2531 2531
2532 /* Configure the adapter for unassociated operation */ 2532 /* Configure the adapter for unassociated operation */
2533 iwlcore_commit_rxon(priv); 2533 iwlcore_commit_rxon(priv);
@@ -2791,11 +2791,8 @@ static void iwl3945_rfkill_poll(struct work_struct *data)
2791 2791
2792} 2792}
2793 2793
2794#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ) 2794void iwl3945_request_scan(struct iwl_priv *priv)
2795static void iwl3945_bg_request_scan(struct work_struct *data)
2796{ 2795{
2797 struct iwl_priv *priv =
2798 container_of(data, struct iwl_priv, request_scan);
2799 struct iwl_host_cmd cmd = { 2796 struct iwl_host_cmd cmd = {
2800 .id = REPLY_SCAN_CMD, 2797 .id = REPLY_SCAN_CMD,
2801 .len = sizeof(struct iwl3945_scan_cmd), 2798 .len = sizeof(struct iwl3945_scan_cmd),
@@ -2809,8 +2806,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
2809 2806
2810 conf = ieee80211_get_hw_conf(priv->hw); 2807 conf = ieee80211_get_hw_conf(priv->hw);
2811 2808
2812 mutex_lock(&priv->mutex);
2813
2814 cancel_delayed_work(&priv->scan_check); 2809 cancel_delayed_work(&priv->scan_check);
2815 2810
2816 if (!iwl_is_ready(priv)) { 2811 if (!iwl_is_ready(priv)) {
@@ -2853,20 +2848,15 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
2853 goto done; 2848 goto done;
2854 } 2849 }
2855 2850
2856 if (!priv->scan_bands) { 2851 if (!priv->scan_cmd) {
2857 IWL_DEBUG_HC(priv, "Aborting scan due to no requested bands\n"); 2852 priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) +
2858 goto done; 2853 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
2859 } 2854 if (!priv->scan_cmd) {
2860
2861 if (!priv->scan) {
2862 priv->scan = kmalloc(sizeof(struct iwl3945_scan_cmd) +
2863 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
2864 if (!priv->scan) {
2865 IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n"); 2855 IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
2866 goto done; 2856 goto done;
2867 } 2857 }
2868 } 2858 }
2869 scan = priv->scan; 2859 scan = priv->scan_cmd;
2870 memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE); 2860 memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
2871 2861
2872 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; 2862 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
@@ -2935,22 +2925,26 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
2935 2925
2936 /* flags + rate selection */ 2926 /* flags + rate selection */
2937 2927
2938 if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) { 2928 switch (priv->scan_band) {
2929 case IEEE80211_BAND_2GHZ:
2939 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; 2930 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
2940 scan->tx_cmd.rate = IWL_RATE_1M_PLCP; 2931 scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
2941 scan->good_CRC_th = 0; 2932 scan->good_CRC_th = 0;
2942 band = IEEE80211_BAND_2GHZ; 2933 band = IEEE80211_BAND_2GHZ;
2943 } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) { 2934 break;
2935 case IEEE80211_BAND_5GHZ:
2944 scan->tx_cmd.rate = IWL_RATE_6M_PLCP; 2936 scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
2945 /* 2937 /*
2946 * If active scaning is requested but a certain channel 2938 * If active scaning is requested but a certain channel
2947 * is marked passive, we can do active scanning if we 2939 * is marked passive, we can do active scanning if we
2948 * detect transmissions. 2940 * detect transmissions.
2949 */ 2941 */
2950 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH : 0; 2942 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
2943 IWL_GOOD_CRC_TH_DISABLED;
2951 band = IEEE80211_BAND_5GHZ; 2944 band = IEEE80211_BAND_5GHZ;
2952 } else { 2945 break;
2953 IWL_WARN(priv, "Invalid scan band count\n"); 2946 default:
2947 IWL_WARN(priv, "Invalid scan band\n");
2954 goto done; 2948 goto done;
2955 } 2949 }
2956 2950
@@ -2971,9 +2965,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
2971 /* select Rx antennas */ 2965 /* select Rx antennas */
2972 scan->flags |= iwl3945_get_antenna_flags(priv); 2966 scan->flags |= iwl3945_get_antenna_flags(priv);
2973 2967
2974 if (iwl_is_monitor_mode(priv))
2975 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
2976
2977 scan->channel_count = 2968 scan->channel_count =
2978 iwl3945_get_channels_for_scan(priv, band, is_active, n_probes, 2969 iwl3945_get_channels_for_scan(priv, band, is_active, n_probes,
2979 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); 2970 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
@@ -2995,7 +2986,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
2995 queue_delayed_work(priv->workqueue, &priv->scan_check, 2986 queue_delayed_work(priv->workqueue, &priv->scan_check,
2996 IWL_SCAN_CHECK_WATCHDOG); 2987 IWL_SCAN_CHECK_WATCHDOG);
2997 2988
2998 mutex_unlock(&priv->mutex);
2999 return; 2989 return;
3000 2990
3001 done: 2991 done:
@@ -3009,7 +2999,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
3009 2999
3010 /* inform mac80211 scan aborted */ 3000 /* inform mac80211 scan aborted */
3011 queue_work(priv->workqueue, &priv->scan_completed); 3001 queue_work(priv->workqueue, &priv->scan_completed);
3012 mutex_unlock(&priv->mutex);
3013} 3002}
3014 3003
3015static void iwl3945_bg_restart(struct work_struct *data) 3004static void iwl3945_bg_restart(struct work_struct *data)
@@ -3051,8 +3040,6 @@ static void iwl3945_bg_rx_replenish(struct work_struct *data)
3051 mutex_unlock(&priv->mutex); 3040 mutex_unlock(&priv->mutex);
3052} 3041}
3053 3042
3054#define IWL_DELAY_NEXT_SCAN (HZ*2)
3055
3056void iwl3945_post_associate(struct iwl_priv *priv) 3043void iwl3945_post_associate(struct iwl_priv *priv)
3057{ 3044{
3058 int rc = 0; 3045 int rc = 0;
@@ -3137,9 +3124,6 @@ void iwl3945_post_associate(struct iwl_priv *priv)
3137 __func__, priv->iw_mode); 3124 __func__, priv->iw_mode);
3138 break; 3125 break;
3139 } 3126 }
3140
3141 /* we have just associated, don't start scan too early */
3142 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
3143} 3127}
3144 3128
3145/***************************************************************************** 3129/*****************************************************************************
@@ -3672,44 +3656,6 @@ static ssize_t show_channels(struct device *d,
3672 3656
3673static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL); 3657static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
3674 3658
3675static ssize_t show_statistics(struct device *d,
3676 struct device_attribute *attr, char *buf)
3677{
3678 struct iwl_priv *priv = dev_get_drvdata(d);
3679 u32 size = sizeof(struct iwl3945_notif_statistics);
3680 u32 len = 0, ofs = 0;
3681 u8 *data = (u8 *)&priv->_3945.statistics;
3682 int rc = 0;
3683
3684 if (!iwl_is_alive(priv))
3685 return -EAGAIN;
3686
3687 mutex_lock(&priv->mutex);
3688 rc = iwl_send_statistics_request(priv, CMD_SYNC, false);
3689 mutex_unlock(&priv->mutex);
3690
3691 if (rc) {
3692 len = sprintf(buf,
3693 "Error sending statistics request: 0x%08X\n", rc);
3694 return len;
3695 }
3696
3697 while (size && (PAGE_SIZE - len)) {
3698 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
3699 PAGE_SIZE - len, 1);
3700 len = strlen(buf);
3701 if (PAGE_SIZE - len)
3702 buf[len++] = '\n';
3703
3704 ofs += 16;
3705 size -= min(size, 16U);
3706 }
3707
3708 return len;
3709}
3710
3711static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
3712
3713static ssize_t show_antenna(struct device *d, 3659static ssize_t show_antenna(struct device *d,
3714 struct device_attribute *attr, char *buf) 3660 struct device_attribute *attr, char *buf)
3715{ 3661{
@@ -3793,7 +3739,6 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
3793 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); 3739 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
3794 INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll); 3740 INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
3795 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); 3741 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
3796 INIT_WORK(&priv->request_scan, iwl3945_bg_request_scan);
3797 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); 3742 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
3798 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); 3743 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
3799 3744
@@ -3830,7 +3775,6 @@ static struct attribute *iwl3945_sysfs_entries[] = {
3830 &dev_attr_filter_flags.attr, 3775 &dev_attr_filter_flags.attr,
3831 &dev_attr_measurement.attr, 3776 &dev_attr_measurement.attr,
3832 &dev_attr_retry_rate.attr, 3777 &dev_attr_retry_rate.attr,
3833 &dev_attr_statistics.attr,
3834 &dev_attr_status.attr, 3778 &dev_attr_status.attr,
3835 &dev_attr_temperature.attr, 3779 &dev_attr_temperature.attr,
3836 &dev_attr_tx_power.attr, 3780 &dev_attr_tx_power.attr,
@@ -3930,7 +3874,6 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3930 3874
3931 /* Tell mac80211 our characteristics */ 3875 /* Tell mac80211 our characteristics */
3932 hw->flags = IEEE80211_HW_SIGNAL_DBM | 3876 hw->flags = IEEE80211_HW_SIGNAL_DBM |
3933 IEEE80211_HW_NOISE_DBM |
3934 IEEE80211_HW_SPECTRUM_MGMT; 3877 IEEE80211_HW_SPECTRUM_MGMT;
3935 3878
3936 if (!priv->cfg->broken_powersave) 3879 if (!priv->cfg->broken_powersave)
@@ -4253,7 +4196,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4253 4196
4254 iwl_free_channel_map(priv); 4197 iwl_free_channel_map(priv);
4255 iwlcore_free_geos(priv); 4198 iwlcore_free_geos(priv);
4256 kfree(priv->scan); 4199 kfree(priv->scan_cmd);
4257 if (priv->ibss_beacon) 4200 if (priv->ibss_beacon)
4258 dev_kfree_skb(priv->ibss_beacon); 4201 dev_kfree_skb(priv->ibss_beacon);
4259 4202
diff --git a/drivers/net/wireless/iwmc3200wifi/Makefile b/drivers/net/wireless/iwmc3200wifi/Makefile
index aeed5cd80819..cdc7e07ba113 100644
--- a/drivers/net/wireless/iwmc3200wifi/Makefile
+++ b/drivers/net/wireless/iwmc3200wifi/Makefile
@@ -6,3 +6,5 @@ iwmc3200wifi-$(CONFIG_IWM_DEBUG) += debugfs.o
6iwmc3200wifi-$(CONFIG_IWM_TRACING) += trace.o 6iwmc3200wifi-$(CONFIG_IWM_TRACING) += trace.o
7 7
8CFLAGS_trace.o := -I$(src) 8CFLAGS_trace.o := -I$(src)
9
10ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwmc3200wifi/bus.h b/drivers/net/wireless/iwmc3200wifi/bus.h
index 836663eec257..62edd5888a7b 100644
--- a/drivers/net/wireless/iwmc3200wifi/bus.h
+++ b/drivers/net/wireless/iwmc3200wifi/bus.h
@@ -31,7 +31,7 @@ struct iwm_if_ops {
31 int (*disable)(struct iwm_priv *iwm); 31 int (*disable)(struct iwm_priv *iwm);
32 int (*send_chunk)(struct iwm_priv *iwm, u8* buf, int count); 32 int (*send_chunk)(struct iwm_priv *iwm, u8* buf, int count);
33 33
34 int (*debugfs_init)(struct iwm_priv *iwm, struct dentry *parent_dir); 34 void (*debugfs_init)(struct iwm_priv *iwm, struct dentry *parent_dir);
35 void (*debugfs_exit)(struct iwm_priv *iwm); 35 void (*debugfs_exit)(struct iwm_priv *iwm);
36 36
37 const char *umac_name; 37 const char *umac_name;
diff --git a/drivers/net/wireless/iwmc3200wifi/debug.h b/drivers/net/wireless/iwmc3200wifi/debug.h
index e35c9b693d1f..a0c13a49ab3c 100644
--- a/drivers/net/wireless/iwmc3200wifi/debug.h
+++ b/drivers/net/wireless/iwmc3200wifi/debug.h
@@ -113,13 +113,10 @@ struct iwm_debugfs {
113}; 113};
114 114
115#ifdef CONFIG_IWM_DEBUG 115#ifdef CONFIG_IWM_DEBUG
116int iwm_debugfs_init(struct iwm_priv *iwm); 116void iwm_debugfs_init(struct iwm_priv *iwm);
117void iwm_debugfs_exit(struct iwm_priv *iwm); 117void iwm_debugfs_exit(struct iwm_priv *iwm);
118#else 118#else
119static inline int iwm_debugfs_init(struct iwm_priv *iwm) 119static inline void iwm_debugfs_init(struct iwm_priv *iwm) {}
120{
121 return 0;
122}
123static inline void iwm_debugfs_exit(struct iwm_priv *iwm) {} 120static inline void iwm_debugfs_exit(struct iwm_priv *iwm) {}
124#endif 121#endif
125 122
diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
index 724441368a18..53b0b7711f02 100644
--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
@@ -48,12 +48,11 @@ static struct {
48 48
49#define add_dbg_module(dbg, name, id, initlevel) \ 49#define add_dbg_module(dbg, name, id, initlevel) \
50do { \ 50do { \
51 struct dentry *d; \
52 dbg.dbg_module[id] = (initlevel); \ 51 dbg.dbg_module[id] = (initlevel); \
53 d = debugfs_create_x8(name, 0600, dbg.dbgdir, \ 52 dbg.dbg_module_dentries[id] = \
54 &(dbg.dbg_module[id])); \ 53 debugfs_create_x8(name, 0600, \
55 if (!IS_ERR(d)) \ 54 dbg.dbgdir, \
56 dbg.dbg_module_dentries[id] = d; \ 55 &(dbg.dbg_module[id])); \
57} while (0) 56} while (0)
58 57
59static int iwm_debugfs_u32_read(void *data, u64 *val) 58static int iwm_debugfs_u32_read(void *data, u64 *val)
@@ -423,89 +422,29 @@ static const struct file_operations iwm_debugfs_fw_err_fops = {
423 .read = iwm_debugfs_fw_err_read, 422 .read = iwm_debugfs_fw_err_read,
424}; 423};
425 424
426int iwm_debugfs_init(struct iwm_priv *iwm) 425void iwm_debugfs_init(struct iwm_priv *iwm)
427{ 426{
428 int i, result; 427 int i;
429 char devdir[16];
430 428
431 iwm->dbg.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL); 429 iwm->dbg.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
432 result = PTR_ERR(iwm->dbg.rootdir); 430 iwm->dbg.devdir = debugfs_create_dir(wiphy_name(iwm_to_wiphy(iwm)),
433 if (!result || IS_ERR(iwm->dbg.rootdir)) { 431 iwm->dbg.rootdir);
434 if (result == -ENODEV) {
435 IWM_ERR(iwm, "DebugFS (CONFIG_DEBUG_FS) not "
436 "enabled in kernel config\n");
437 result = 0; /* No debugfs support */
438 }
439 IWM_ERR(iwm, "Couldn't create rootdir: %d\n", result);
440 goto error;
441 }
442
443 snprintf(devdir, sizeof(devdir), "%s", wiphy_name(iwm_to_wiphy(iwm)));
444
445 iwm->dbg.devdir = debugfs_create_dir(devdir, iwm->dbg.rootdir);
446 result = PTR_ERR(iwm->dbg.devdir);
447 if (IS_ERR(iwm->dbg.devdir) && (result != -ENODEV)) {
448 IWM_ERR(iwm, "Couldn't create devdir: %d\n", result);
449 goto error;
450 }
451
452 iwm->dbg.dbgdir = debugfs_create_dir("debug", iwm->dbg.devdir); 432 iwm->dbg.dbgdir = debugfs_create_dir("debug", iwm->dbg.devdir);
453 result = PTR_ERR(iwm->dbg.dbgdir);
454 if (IS_ERR(iwm->dbg.dbgdir) && (result != -ENODEV)) {
455 IWM_ERR(iwm, "Couldn't create dbgdir: %d\n", result);
456 goto error;
457 }
458
459 iwm->dbg.rxdir = debugfs_create_dir("rx", iwm->dbg.devdir); 433 iwm->dbg.rxdir = debugfs_create_dir("rx", iwm->dbg.devdir);
460 result = PTR_ERR(iwm->dbg.rxdir);
461 if (IS_ERR(iwm->dbg.rxdir) && (result != -ENODEV)) {
462 IWM_ERR(iwm, "Couldn't create rx dir: %d\n", result);
463 goto error;
464 }
465
466 iwm->dbg.txdir = debugfs_create_dir("tx", iwm->dbg.devdir); 434 iwm->dbg.txdir = debugfs_create_dir("tx", iwm->dbg.devdir);
467 result = PTR_ERR(iwm->dbg.txdir);
468 if (IS_ERR(iwm->dbg.txdir) && (result != -ENODEV)) {
469 IWM_ERR(iwm, "Couldn't create tx dir: %d\n", result);
470 goto error;
471 }
472
473 iwm->dbg.busdir = debugfs_create_dir("bus", iwm->dbg.devdir); 435 iwm->dbg.busdir = debugfs_create_dir("bus", iwm->dbg.devdir);
474 result = PTR_ERR(iwm->dbg.busdir); 436 if (iwm->bus_ops->debugfs_init)
475 if (IS_ERR(iwm->dbg.busdir) && (result != -ENODEV)) { 437 iwm->bus_ops->debugfs_init(iwm, iwm->dbg.busdir);
476 IWM_ERR(iwm, "Couldn't create bus dir: %d\n", result);
477 goto error;
478 }
479
480 if (iwm->bus_ops->debugfs_init) {
481 result = iwm->bus_ops->debugfs_init(iwm, iwm->dbg.busdir);
482 if (result < 0) {
483 IWM_ERR(iwm, "Couldn't create bus entry: %d\n", result);
484 goto error;
485 }
486 }
487
488 438
489 iwm->dbg.dbg_level = IWM_DL_NONE; 439 iwm->dbg.dbg_level = IWM_DL_NONE;
490 iwm->dbg.dbg_level_dentry = 440 iwm->dbg.dbg_level_dentry =
491 debugfs_create_file("level", 0200, iwm->dbg.dbgdir, iwm, 441 debugfs_create_file("level", 0200, iwm->dbg.dbgdir, iwm,
492 &fops_iwm_dbg_level); 442 &fops_iwm_dbg_level);
493 result = PTR_ERR(iwm->dbg.dbg_level_dentry);
494 if (IS_ERR(iwm->dbg.dbg_level_dentry) && (result != -ENODEV)) {
495 IWM_ERR(iwm, "Couldn't create dbg_level: %d\n", result);
496 goto error;
497 }
498
499 443
500 iwm->dbg.dbg_modules = IWM_DM_DEFAULT; 444 iwm->dbg.dbg_modules = IWM_DM_DEFAULT;
501 iwm->dbg.dbg_modules_dentry = 445 iwm->dbg.dbg_modules_dentry =
502 debugfs_create_file("modules", 0200, iwm->dbg.dbgdir, iwm, 446 debugfs_create_file("modules", 0200, iwm->dbg.dbgdir, iwm,
503 &fops_iwm_dbg_modules); 447 &fops_iwm_dbg_modules);
504 result = PTR_ERR(iwm->dbg.dbg_modules_dentry);
505 if (IS_ERR(iwm->dbg.dbg_modules_dentry) && (result != -ENODEV)) {
506 IWM_ERR(iwm, "Couldn't create dbg_modules: %d\n", result);
507 goto error;
508 }
509 448
510 for (i = 0; i < __IWM_DM_NR; i++) 449 for (i = 0; i < __IWM_DM_NR; i++)
511 add_dbg_module(iwm->dbg, iwm_debug_module[i].name, 450 add_dbg_module(iwm->dbg, iwm_debug_module[i].name,
@@ -514,44 +453,15 @@ int iwm_debugfs_init(struct iwm_priv *iwm)
514 iwm->dbg.txq_dentry = debugfs_create_file("queues", 0200, 453 iwm->dbg.txq_dentry = debugfs_create_file("queues", 0200,
515 iwm->dbg.txdir, iwm, 454 iwm->dbg.txdir, iwm,
516 &iwm_debugfs_txq_fops); 455 &iwm_debugfs_txq_fops);
517 result = PTR_ERR(iwm->dbg.txq_dentry);
518 if (IS_ERR(iwm->dbg.txq_dentry) && (result != -ENODEV)) {
519 IWM_ERR(iwm, "Couldn't create tx queue: %d\n", result);
520 goto error;
521 }
522
523 iwm->dbg.tx_credit_dentry = debugfs_create_file("credits", 0200, 456 iwm->dbg.tx_credit_dentry = debugfs_create_file("credits", 0200,
524 iwm->dbg.txdir, iwm, 457 iwm->dbg.txdir, iwm,
525 &iwm_debugfs_tx_credit_fops); 458 &iwm_debugfs_tx_credit_fops);
526 result = PTR_ERR(iwm->dbg.tx_credit_dentry);
527 if (IS_ERR(iwm->dbg.tx_credit_dentry) && (result != -ENODEV)) {
528 IWM_ERR(iwm, "Couldn't create tx credit: %d\n", result);
529 goto error;
530 }
531
532 iwm->dbg.rx_ticket_dentry = debugfs_create_file("tickets", 0200, 459 iwm->dbg.rx_ticket_dentry = debugfs_create_file("tickets", 0200,
533 iwm->dbg.rxdir, iwm, 460 iwm->dbg.rxdir, iwm,
534 &iwm_debugfs_rx_ticket_fops); 461 &iwm_debugfs_rx_ticket_fops);
535 result = PTR_ERR(iwm->dbg.rx_ticket_dentry);
536 if (IS_ERR(iwm->dbg.rx_ticket_dentry) && (result != -ENODEV)) {
537 IWM_ERR(iwm, "Couldn't create rx ticket: %d\n", result);
538 goto error;
539 }
540
541 iwm->dbg.fw_err_dentry = debugfs_create_file("last_fw_err", 0200, 462 iwm->dbg.fw_err_dentry = debugfs_create_file("last_fw_err", 0200,
542 iwm->dbg.dbgdir, iwm, 463 iwm->dbg.dbgdir, iwm,
543 &iwm_debugfs_fw_err_fops); 464 &iwm_debugfs_fw_err_fops);
544 result = PTR_ERR(iwm->dbg.fw_err_dentry);
545 if (IS_ERR(iwm->dbg.fw_err_dentry) && (result != -ENODEV)) {
546 IWM_ERR(iwm, "Couldn't create last FW err: %d\n", result);
547 goto error;
548 }
549
550
551 return 0;
552
553 error:
554 return result;
555} 465}
556 466
557void iwm_debugfs_exit(struct iwm_priv *iwm) 467void iwm_debugfs_exit(struct iwm_priv *iwm)
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index ad5398779240..e1184deca559 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -431,7 +431,8 @@ static int iwm_ntf_rx_ticket(struct iwm_priv *iwm, u8 *buf,
431 return PTR_ERR(ticket_node); 431 return PTR_ERR(ticket_node);
432 432
433 IWM_DBG_RX(iwm, DBG, "TICKET %s(%d)\n", 433 IWM_DBG_RX(iwm, DBG, "TICKET %s(%d)\n",
434 ticket->action == IWM_RX_TICKET_RELEASE ? 434 __le16_to_cpu(ticket->action) ==
435 IWM_RX_TICKET_RELEASE ?
435 "RELEASE" : "DROP", 436 "RELEASE" : "DROP",
436 ticket->id); 437 ticket->id);
437 spin_lock(&iwm->ticket_lock); 438 spin_lock(&iwm->ticket_lock);
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
index 1eafd6dec3fd..1acea37f39f8 100644
--- a/drivers/net/wireless/iwmc3200wifi/sdio.c
+++ b/drivers/net/wireless/iwmc3200wifi/sdio.c
@@ -366,21 +366,13 @@ static const struct file_operations iwm_debugfs_sdio_fops = {
366 .read = iwm_debugfs_sdio_read, 366 .read = iwm_debugfs_sdio_read,
367}; 367};
368 368
369static int if_sdio_debugfs_init(struct iwm_priv *iwm, struct dentry *parent_dir) 369static void if_sdio_debugfs_init(struct iwm_priv *iwm, struct dentry *parent_dir)
370{ 370{
371 int result;
372 struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm); 371 struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
373 372
374 hw->cccr_dentry = debugfs_create_file("cccr", 0200, 373 hw->cccr_dentry = debugfs_create_file("cccr", 0200,
375 parent_dir, iwm, 374 parent_dir, iwm,
376 &iwm_debugfs_sdio_fops); 375 &iwm_debugfs_sdio_fops);
377 result = PTR_ERR(hw->cccr_dentry);
378 if (IS_ERR(hw->cccr_dentry) && (result != -ENODEV)) {
379 IWM_ERR(iwm, "Couldn't create CCCR entry: %d\n", result);
380 return result;
381 }
382
383 return 0;
384} 376}
385 377
386static void if_sdio_debugfs_exit(struct iwm_priv *iwm) 378static void if_sdio_debugfs_exit(struct iwm_priv *iwm)
@@ -440,11 +432,7 @@ static int iwm_sdio_probe(struct sdio_func *func,
440 hw = iwm_private(iwm); 432 hw = iwm_private(iwm);
441 hw->iwm = iwm; 433 hw->iwm = iwm;
442 434
443 ret = iwm_debugfs_init(iwm); 435 iwm_debugfs_init(iwm);
444 if (ret < 0) {
445 IWM_ERR(iwm, "Debugfs registration failed\n");
446 goto if_free;
447 }
448 436
449 sdio_set_drvdata(func, hw); 437 sdio_set_drvdata(func, hw);
450 438
@@ -473,7 +461,6 @@ static int iwm_sdio_probe(struct sdio_func *func,
473 destroy_workqueue(hw->isr_wq); 461 destroy_workqueue(hw->isr_wq);
474 debugfs_exit: 462 debugfs_exit:
475 iwm_debugfs_exit(iwm); 463 iwm_debugfs_exit(iwm);
476 if_free:
477 iwm_if_free(iwm); 464 iwm_if_free(iwm);
478 return ret; 465 return ret;
479} 466}
diff --git a/drivers/net/wireless/iwmc3200wifi/trace.h b/drivers/net/wireless/iwmc3200wifi/trace.h
index 320e54fbb38c..abb4805fa8df 100644
--- a/drivers/net/wireless/iwmc3200wifi/trace.h
+++ b/drivers/net/wireless/iwmc3200wifi/trace.h
@@ -76,7 +76,7 @@ TRACE_EVENT(iwm_tx_wifi_cmd,
76 IWM_ASSIGN; 76 IWM_ASSIGN;
77 __entry->opcode = hdr->sw_hdr.cmd.cmd; 77 __entry->opcode = hdr->sw_hdr.cmd.cmd;
78 __entry->lmac = 0; 78 __entry->lmac = 0;
79 __entry->seq = hdr->sw_hdr.cmd.seq_num; 79 __entry->seq = __le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
80 __entry->resp = GET_VAL8(hdr->sw_hdr.cmd.flags, UMAC_DEV_CMD_FLAGS_RESP_REQ); 80 __entry->resp = GET_VAL8(hdr->sw_hdr.cmd.flags, UMAC_DEV_CMD_FLAGS_RESP_REQ);
81 __entry->color = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_TX_STA_COLOR); 81 __entry->color = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_TX_STA_COLOR);
82 __entry->eot = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_OUT_CMD_EOT); 82 __entry->eot = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_OUT_CMD_EOT);
@@ -123,7 +123,7 @@ TRACE_EVENT(iwm_tx_packets,
123 __entry->ra_tid = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_RATID); 123 __entry->ra_tid = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_RATID);
124 __entry->credit_group = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_CREDIT_GRP); 124 __entry->credit_group = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_CREDIT_GRP);
125 __entry->color = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_TX_STA_COLOR); 125 __entry->color = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_TX_STA_COLOR);
126 __entry->seq = hdr->sw_hdr.cmd.seq_num; 126 __entry->seq = __le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
127 __entry->npkt = 1; 127 __entry->npkt = 1;
128 __entry->bytes = len; 128 __entry->bytes = len;
129 129
diff --git a/drivers/net/wireless/iwmc3200wifi/tx.c b/drivers/net/wireless/iwmc3200wifi/tx.c
index 9537cdb13d3f..3216621fc55a 100644
--- a/drivers/net/wireless/iwmc3200wifi/tx.c
+++ b/drivers/net/wireless/iwmc3200wifi/tx.c
@@ -302,8 +302,8 @@ void iwm_tx_credit_init_pools(struct iwm_priv *iwm,
302 302
303#define IWM_UDMA_HDR_LEN sizeof(struct iwm_umac_wifi_out_hdr) 303#define IWM_UDMA_HDR_LEN sizeof(struct iwm_umac_wifi_out_hdr)
304 304
305static int iwm_tx_build_packet(struct iwm_priv *iwm, struct sk_buff *skb, 305static __le16 iwm_tx_build_packet(struct iwm_priv *iwm, struct sk_buff *skb,
306 int pool_id, u8 *buf) 306 int pool_id, u8 *buf)
307{ 307{
308 struct iwm_umac_wifi_out_hdr *hdr = (struct iwm_umac_wifi_out_hdr *)buf; 308 struct iwm_umac_wifi_out_hdr *hdr = (struct iwm_umac_wifi_out_hdr *)buf;
309 struct iwm_udma_wifi_cmd udma_cmd; 309 struct iwm_udma_wifi_cmd udma_cmd;
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index cd464a2589b9..64dd345d30f5 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -315,12 +315,30 @@ out:
315 return ret; 315 return ret;
316} 316}
317 317
318static int if_sdio_wait_status(struct if_sdio_card *card, const u8 condition)
319{
320 u8 status;
321 unsigned long timeout;
322 int ret = 0;
323
324 timeout = jiffies + HZ;
325 while (1) {
326 status = sdio_readb(card->func, IF_SDIO_STATUS, &ret);
327 if (ret)
328 return ret;
329 if ((status & condition) == condition)
330 break;
331 if (time_after(jiffies, timeout))
332 return -ETIMEDOUT;
333 mdelay(1);
334 }
335 return ret;
336}
337
318static int if_sdio_card_to_host(struct if_sdio_card *card) 338static int if_sdio_card_to_host(struct if_sdio_card *card)
319{ 339{
320 int ret; 340 int ret;
321 u8 status;
322 u16 size, type, chunk; 341 u16 size, type, chunk;
323 unsigned long timeout;
324 342
325 lbs_deb_enter(LBS_DEB_SDIO); 343 lbs_deb_enter(LBS_DEB_SDIO);
326 344
@@ -335,19 +353,9 @@ static int if_sdio_card_to_host(struct if_sdio_card *card)
335 goto out; 353 goto out;
336 } 354 }
337 355
338 timeout = jiffies + HZ; 356 ret = if_sdio_wait_status(card, IF_SDIO_IO_RDY);
339 while (1) { 357 if (ret)
340 status = sdio_readb(card->func, IF_SDIO_STATUS, &ret); 358 goto out;
341 if (ret)
342 goto out;
343 if (status & IF_SDIO_IO_RDY)
344 break;
345 if (time_after(jiffies, timeout)) {
346 ret = -ETIMEDOUT;
347 goto out;
348 }
349 mdelay(1);
350 }
351 359
352 /* 360 /*
353 * The transfer must be in one transaction or the firmware 361 * The transfer must be in one transaction or the firmware
@@ -414,8 +422,6 @@ static void if_sdio_host_to_card_worker(struct work_struct *work)
414{ 422{
415 struct if_sdio_card *card; 423 struct if_sdio_card *card;
416 struct if_sdio_packet *packet; 424 struct if_sdio_packet *packet;
417 unsigned long timeout;
418 u8 status;
419 int ret; 425 int ret;
420 unsigned long flags; 426 unsigned long flags;
421 427
@@ -435,25 +441,15 @@ static void if_sdio_host_to_card_worker(struct work_struct *work)
435 441
436 sdio_claim_host(card->func); 442 sdio_claim_host(card->func);
437 443
438 timeout = jiffies + HZ; 444 ret = if_sdio_wait_status(card, IF_SDIO_IO_RDY);
439 while (1) { 445 if (ret == 0) {
440 status = sdio_readb(card->func, IF_SDIO_STATUS, &ret); 446 ret = sdio_writesb(card->func, card->ioport,
441 if (ret) 447 packet->buffer, packet->nb);
442 goto release;
443 if (status & IF_SDIO_IO_RDY)
444 break;
445 if (time_after(jiffies, timeout)) {
446 ret = -ETIMEDOUT;
447 goto release;
448 }
449 mdelay(1);
450 } 448 }
451 449
452 ret = sdio_writesb(card->func, card->ioport,
453 packet->buffer, packet->nb);
454 if (ret) 450 if (ret)
455 goto release; 451 lbs_pr_err("error %d sending packet to firmware\n", ret);
456release: 452
457 sdio_release_host(card->func); 453 sdio_release_host(card->func);
458 454
459 kfree(packet); 455 kfree(packet);
@@ -466,10 +462,11 @@ release:
466/* Firmware */ 462/* Firmware */
467/********************************************************************/ 463/********************************************************************/
468 464
465#define FW_DL_READY_STATUS (IF_SDIO_IO_RDY | IF_SDIO_DL_RDY)
466
469static int if_sdio_prog_helper(struct if_sdio_card *card) 467static int if_sdio_prog_helper(struct if_sdio_card *card)
470{ 468{
471 int ret; 469 int ret;
472 u8 status;
473 const struct firmware *fw; 470 const struct firmware *fw;
474 unsigned long timeout; 471 unsigned long timeout;
475 u8 *chunk_buffer; 472 u8 *chunk_buffer;
@@ -501,20 +498,14 @@ static int if_sdio_prog_helper(struct if_sdio_card *card)
501 size = fw->size; 498 size = fw->size;
502 499
503 while (size) { 500 while (size) {
504 timeout = jiffies + HZ; 501 ret = if_sdio_wait_status(card, FW_DL_READY_STATUS);
505 while (1) { 502 if (ret)
506 status = sdio_readb(card->func, IF_SDIO_STATUS, &ret); 503 goto release;
507 if (ret) 504
508 goto release; 505 /* On some platforms (like Davinci) the chip needs more time
509 if ((status & IF_SDIO_IO_RDY) && 506 * between helper blocks.
510 (status & IF_SDIO_DL_RDY)) 507 */
511 break; 508 mdelay(2);
512 if (time_after(jiffies, timeout)) {
513 ret = -ETIMEDOUT;
514 goto release;
515 }
516 mdelay(1);
517 }
518 509
519 chunk_size = min(size, (size_t)60); 510 chunk_size = min(size, (size_t)60);
520 511
@@ -584,7 +575,6 @@ out:
584static int if_sdio_prog_real(struct if_sdio_card *card) 575static int if_sdio_prog_real(struct if_sdio_card *card)
585{ 576{
586 int ret; 577 int ret;
587 u8 status;
588 const struct firmware *fw; 578 const struct firmware *fw;
589 unsigned long timeout; 579 unsigned long timeout;
590 u8 *chunk_buffer; 580 u8 *chunk_buffer;
@@ -616,20 +606,9 @@ static int if_sdio_prog_real(struct if_sdio_card *card)
616 size = fw->size; 606 size = fw->size;
617 607
618 while (size) { 608 while (size) {
619 timeout = jiffies + HZ; 609 ret = if_sdio_wait_status(card, FW_DL_READY_STATUS);
620 while (1) { 610 if (ret)
621 status = sdio_readb(card->func, IF_SDIO_STATUS, &ret); 611 goto release;
622 if (ret)
623 goto release;
624 if ((status & IF_SDIO_IO_RDY) &&
625 (status & IF_SDIO_DL_RDY))
626 break;
627 if (time_after(jiffies, timeout)) {
628 ret = -ETIMEDOUT;
629 goto release;
630 }
631 mdelay(1);
632 }
633 612
634 req_size = sdio_readb(card->func, IF_SDIO_RD_BASE, &ret); 613 req_size = sdio_readb(card->func, IF_SDIO_RD_BASE, &ret);
635 if (ret) 614 if (ret)
diff --git a/drivers/net/wireless/libertas_tf/cmd.c b/drivers/net/wireless/libertas_tf/cmd.c
index b620daf59ef7..8945afd6ce3e 100644
--- a/drivers/net/wireless/libertas_tf/cmd.c
+++ b/drivers/net/wireless/libertas_tf/cmd.c
@@ -7,6 +7,8 @@
7 * the Free Software Foundation; either version 2 of the License, or (at 7 * the Free Software Foundation; either version 2 of the License, or (at
8 * your option) any later version. 8 * your option) any later version.
9 */ 9 */
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
10#include <linux/slab.h> 12#include <linux/slab.h>
11 13
12#include "libertas_tf.h" 14#include "libertas_tf.h"
@@ -82,6 +84,8 @@ int lbtf_update_hw_spec(struct lbtf_private *priv)
82 int ret = -1; 84 int ret = -1;
83 u32 i; 85 u32 i;
84 86
87 lbtf_deb_enter(LBTF_DEB_CMD);
88
85 memset(&cmd, 0, sizeof(cmd)); 89 memset(&cmd, 0, sizeof(cmd));
86 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 90 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
87 memcpy(cmd.permanentaddr, priv->current_addr, ETH_ALEN); 91 memcpy(cmd.permanentaddr, priv->current_addr, ETH_ALEN);
@@ -104,6 +108,8 @@ int lbtf_update_hw_spec(struct lbtf_private *priv)
104 priv->fwrelease >> 8 & 0xff, 108 priv->fwrelease >> 8 & 0xff,
105 priv->fwrelease & 0xff, 109 priv->fwrelease & 0xff,
106 priv->fwcapinfo); 110 priv->fwcapinfo);
111 lbtf_deb_cmd("GET_HW_SPEC: hardware interface 0x%x, hardware spec 0x%04x\n",
112 cmd.hwifversion, cmd.version);
107 113
108 /* Clamp region code to 8-bit since FW spec indicates that it should 114 /* Clamp region code to 8-bit since FW spec indicates that it should
109 * only ever be 8-bit, even though the field size is 16-bit. Some 115 * only ever be 8-bit, even though the field size is 16-bit. Some
@@ -118,8 +124,10 @@ int lbtf_update_hw_spec(struct lbtf_private *priv)
118 } 124 }
119 125
120 /* if it's unidentified region code, use the default (USA) */ 126 /* if it's unidentified region code, use the default (USA) */
121 if (i >= MRVDRV_MAX_REGION_CODE) 127 if (i >= MRVDRV_MAX_REGION_CODE) {
122 priv->regioncode = 0x10; 128 priv->regioncode = 0x10;
129 pr_info("unidentified region code; using the default (USA)\n");
130 }
123 131
124 if (priv->current_addr[0] == 0xff) 132 if (priv->current_addr[0] == 0xff)
125 memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN); 133 memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN);
@@ -128,6 +136,7 @@ int lbtf_update_hw_spec(struct lbtf_private *priv)
128 136
129 lbtf_geo_init(priv); 137 lbtf_geo_init(priv);
130out: 138out:
139 lbtf_deb_leave(LBTF_DEB_CMD);
131 return ret; 140 return ret;
132} 141}
133 142
@@ -141,13 +150,18 @@ out:
141 */ 150 */
142int lbtf_set_channel(struct lbtf_private *priv, u8 channel) 151int lbtf_set_channel(struct lbtf_private *priv, u8 channel)
143{ 152{
153 int ret = 0;
144 struct cmd_ds_802_11_rf_channel cmd; 154 struct cmd_ds_802_11_rf_channel cmd;
145 155
156 lbtf_deb_enter(LBTF_DEB_CMD);
157
146 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 158 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
147 cmd.action = cpu_to_le16(CMD_OPT_802_11_RF_CHANNEL_SET); 159 cmd.action = cpu_to_le16(CMD_OPT_802_11_RF_CHANNEL_SET);
148 cmd.channel = cpu_to_le16(channel); 160 cmd.channel = cpu_to_le16(channel);
149 161
150 return lbtf_cmd_with_response(priv, CMD_802_11_RF_CHANNEL, &cmd); 162 ret = lbtf_cmd_with_response(priv, CMD_802_11_RF_CHANNEL, &cmd);
163 lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", ret);
164 return ret;
151} 165}
152 166
153int lbtf_beacon_set(struct lbtf_private *priv, struct sk_buff *beacon) 167int lbtf_beacon_set(struct lbtf_private *priv, struct sk_buff *beacon)
@@ -155,20 +169,28 @@ int lbtf_beacon_set(struct lbtf_private *priv, struct sk_buff *beacon)
155 struct cmd_ds_802_11_beacon_set cmd; 169 struct cmd_ds_802_11_beacon_set cmd;
156 int size; 170 int size;
157 171
158 if (beacon->len > MRVL_MAX_BCN_SIZE) 172 lbtf_deb_enter(LBTF_DEB_CMD);
173
174 if (beacon->len > MRVL_MAX_BCN_SIZE) {
175 lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", -1);
159 return -1; 176 return -1;
177 }
160 size = sizeof(cmd) - sizeof(cmd.beacon) + beacon->len; 178 size = sizeof(cmd) - sizeof(cmd.beacon) + beacon->len;
161 cmd.hdr.size = cpu_to_le16(size); 179 cmd.hdr.size = cpu_to_le16(size);
162 cmd.len = cpu_to_le16(beacon->len); 180 cmd.len = cpu_to_le16(beacon->len);
163 memcpy(cmd.beacon, (u8 *) beacon->data, beacon->len); 181 memcpy(cmd.beacon, (u8 *) beacon->data, beacon->len);
164 182
165 lbtf_cmd_async(priv, CMD_802_11_BEACON_SET, &cmd.hdr, size); 183 lbtf_cmd_async(priv, CMD_802_11_BEACON_SET, &cmd.hdr, size);
184
185 lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", 0);
166 return 0; 186 return 0;
167} 187}
168 188
169int lbtf_beacon_ctrl(struct lbtf_private *priv, bool beacon_enable, 189int lbtf_beacon_ctrl(struct lbtf_private *priv, bool beacon_enable,
170 int beacon_int) { 190 int beacon_int)
191{
171 struct cmd_ds_802_11_beacon_control cmd; 192 struct cmd_ds_802_11_beacon_control cmd;
193 lbtf_deb_enter(LBTF_DEB_CMD);
172 194
173 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 195 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
174 cmd.action = cpu_to_le16(CMD_ACT_SET); 196 cmd.action = cpu_to_le16(CMD_ACT_SET);
@@ -176,6 +198,8 @@ int lbtf_beacon_ctrl(struct lbtf_private *priv, bool beacon_enable,
176 cmd.beacon_period = cpu_to_le16(beacon_int); 198 cmd.beacon_period = cpu_to_le16(beacon_int);
177 199
178 lbtf_cmd_async(priv, CMD_802_11_BEACON_CTRL, &cmd.hdr, sizeof(cmd)); 200 lbtf_cmd_async(priv, CMD_802_11_BEACON_CTRL, &cmd.hdr, sizeof(cmd));
201
202 lbtf_deb_leave(LBTF_DEB_CMD);
179 return 0; 203 return 0;
180} 204}
181 205
@@ -183,17 +207,28 @@ static void lbtf_queue_cmd(struct lbtf_private *priv,
183 struct cmd_ctrl_node *cmdnode) 207 struct cmd_ctrl_node *cmdnode)
184{ 208{
185 unsigned long flags; 209 unsigned long flags;
210 lbtf_deb_enter(LBTF_DEB_HOST);
186 211
187 if (!cmdnode) 212 if (!cmdnode) {
188 return; 213 lbtf_deb_host("QUEUE_CMD: cmdnode is NULL\n");
214 goto qcmd_done;
215 }
189 216
190 if (!cmdnode->cmdbuf->size) 217 if (!cmdnode->cmdbuf->size) {
191 return; 218 lbtf_deb_host("DNLD_CMD: cmd size is zero\n");
219 goto qcmd_done;
220 }
192 221
193 cmdnode->result = 0; 222 cmdnode->result = 0;
194 spin_lock_irqsave(&priv->driver_lock, flags); 223 spin_lock_irqsave(&priv->driver_lock, flags);
195 list_add_tail(&cmdnode->list, &priv->cmdpendingq); 224 list_add_tail(&cmdnode->list, &priv->cmdpendingq);
196 spin_unlock_irqrestore(&priv->driver_lock, flags); 225 spin_unlock_irqrestore(&priv->driver_lock, flags);
226
227 lbtf_deb_host("QUEUE_CMD: inserted command 0x%04x into cmdpendingq\n",
228 le16_to_cpu(cmdnode->cmdbuf->command));
229
230qcmd_done:
231 lbtf_deb_leave(LBTF_DEB_HOST);
197} 232}
198 233
199static void lbtf_submit_command(struct lbtf_private *priv, 234static void lbtf_submit_command(struct lbtf_private *priv,
@@ -206,22 +241,33 @@ static void lbtf_submit_command(struct lbtf_private *priv,
206 int timeo = 5 * HZ; 241 int timeo = 5 * HZ;
207 int ret; 242 int ret;
208 243
244 lbtf_deb_enter(LBTF_DEB_HOST);
245
209 cmd = cmdnode->cmdbuf; 246 cmd = cmdnode->cmdbuf;
210 247
211 spin_lock_irqsave(&priv->driver_lock, flags); 248 spin_lock_irqsave(&priv->driver_lock, flags);
212 priv->cur_cmd = cmdnode; 249 priv->cur_cmd = cmdnode;
213 cmdsize = le16_to_cpu(cmd->size); 250 cmdsize = le16_to_cpu(cmd->size);
214 command = le16_to_cpu(cmd->command); 251 command = le16_to_cpu(cmd->command);
252
253 lbtf_deb_cmd("DNLD_CMD: command 0x%04x, seq %d, size %d\n",
254 command, le16_to_cpu(cmd->seqnum), cmdsize);
255 lbtf_deb_hex(LBTF_DEB_CMD, "DNLD_CMD", (void *) cmdnode->cmdbuf, cmdsize);
256
215 ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) cmd, cmdsize); 257 ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) cmd, cmdsize);
216 spin_unlock_irqrestore(&priv->driver_lock, flags); 258 spin_unlock_irqrestore(&priv->driver_lock, flags);
217 259
218 if (ret) 260 if (ret) {
261 pr_info("DNLD_CMD: hw_host_to_card failed: %d\n", ret);
219 /* Let the timer kick in and retry, and potentially reset 262 /* Let the timer kick in and retry, and potentially reset
220 the whole thing if the condition persists */ 263 the whole thing if the condition persists */
221 timeo = HZ; 264 timeo = HZ;
265 }
222 266
223 /* Setup the timer after transmit command */ 267 /* Setup the timer after transmit command */
224 mod_timer(&priv->command_timer, jiffies + timeo); 268 mod_timer(&priv->command_timer, jiffies + timeo);
269
270 lbtf_deb_leave(LBTF_DEB_HOST);
225} 271}
226 272
227/** 273/**
@@ -231,8 +277,10 @@ static void lbtf_submit_command(struct lbtf_private *priv,
231static void __lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv, 277static void __lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv,
232 struct cmd_ctrl_node *cmdnode) 278 struct cmd_ctrl_node *cmdnode)
233{ 279{
280 lbtf_deb_enter(LBTF_DEB_HOST);
281
234 if (!cmdnode) 282 if (!cmdnode)
235 return; 283 goto cl_ins_out;
236 284
237 cmdnode->callback = NULL; 285 cmdnode->callback = NULL;
238 cmdnode->callback_arg = 0; 286 cmdnode->callback_arg = 0;
@@ -240,6 +288,9 @@ static void __lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv,
240 memset(cmdnode->cmdbuf, 0, LBS_CMD_BUFFER_SIZE); 288 memset(cmdnode->cmdbuf, 0, LBS_CMD_BUFFER_SIZE);
241 289
242 list_add_tail(&cmdnode->list, &priv->cmdfreeq); 290 list_add_tail(&cmdnode->list, &priv->cmdfreeq);
291
292cl_ins_out:
293 lbtf_deb_leave(LBTF_DEB_HOST);
243} 294}
244 295
245static void lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv, 296static void lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv,
@@ -268,29 +319,41 @@ int lbtf_cmd_set_mac_multicast_addr(struct lbtf_private *priv)
268{ 319{
269 struct cmd_ds_mac_multicast_addr cmd; 320 struct cmd_ds_mac_multicast_addr cmd;
270 321
322 lbtf_deb_enter(LBTF_DEB_CMD);
323
271 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 324 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
272 cmd.action = cpu_to_le16(CMD_ACT_SET); 325 cmd.action = cpu_to_le16(CMD_ACT_SET);
273 326
274 cmd.nr_of_adrs = cpu_to_le16((u16) priv->nr_of_multicastmacaddr); 327 cmd.nr_of_adrs = cpu_to_le16((u16) priv->nr_of_multicastmacaddr);
328
329 lbtf_deb_cmd("MULTICAST_ADR: setting %d addresses\n", cmd.nr_of_adrs);
330
275 memcpy(cmd.maclist, priv->multicastlist, 331 memcpy(cmd.maclist, priv->multicastlist,
276 priv->nr_of_multicastmacaddr * ETH_ALEN); 332 priv->nr_of_multicastmacaddr * ETH_ALEN);
277 333
278 lbtf_cmd_async(priv, CMD_MAC_MULTICAST_ADR, &cmd.hdr, sizeof(cmd)); 334 lbtf_cmd_async(priv, CMD_MAC_MULTICAST_ADR, &cmd.hdr, sizeof(cmd));
335
336 lbtf_deb_leave(LBTF_DEB_CMD);
279 return 0; 337 return 0;
280} 338}
281 339
282void lbtf_set_mode(struct lbtf_private *priv, enum lbtf_mode mode) 340void lbtf_set_mode(struct lbtf_private *priv, enum lbtf_mode mode)
283{ 341{
284 struct cmd_ds_set_mode cmd; 342 struct cmd_ds_set_mode cmd;
343 lbtf_deb_enter(LBTF_DEB_WEXT);
285 344
286 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 345 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
287 cmd.mode = cpu_to_le16(mode); 346 cmd.mode = cpu_to_le16(mode);
347 lbtf_deb_wext("Switching to mode: 0x%x\n", mode);
288 lbtf_cmd_async(priv, CMD_802_11_SET_MODE, &cmd.hdr, sizeof(cmd)); 348 lbtf_cmd_async(priv, CMD_802_11_SET_MODE, &cmd.hdr, sizeof(cmd));
349
350 lbtf_deb_leave(LBTF_DEB_WEXT);
289} 351}
290 352
291void lbtf_set_bssid(struct lbtf_private *priv, bool activate, const u8 *bssid) 353void lbtf_set_bssid(struct lbtf_private *priv, bool activate, const u8 *bssid)
292{ 354{
293 struct cmd_ds_set_bssid cmd; 355 struct cmd_ds_set_bssid cmd;
356 lbtf_deb_enter(LBTF_DEB_CMD);
294 357
295 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 358 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
296 cmd.activate = activate ? 1 : 0; 359 cmd.activate = activate ? 1 : 0;
@@ -298,11 +361,13 @@ void lbtf_set_bssid(struct lbtf_private *priv, bool activate, const u8 *bssid)
298 memcpy(cmd.bssid, bssid, ETH_ALEN); 361 memcpy(cmd.bssid, bssid, ETH_ALEN);
299 362
300 lbtf_cmd_async(priv, CMD_802_11_SET_BSSID, &cmd.hdr, sizeof(cmd)); 363 lbtf_cmd_async(priv, CMD_802_11_SET_BSSID, &cmd.hdr, sizeof(cmd));
364 lbtf_deb_leave(LBTF_DEB_CMD);
301} 365}
302 366
303int lbtf_set_mac_address(struct lbtf_private *priv, uint8_t *mac_addr) 367int lbtf_set_mac_address(struct lbtf_private *priv, uint8_t *mac_addr)
304{ 368{
305 struct cmd_ds_802_11_mac_address cmd; 369 struct cmd_ds_802_11_mac_address cmd;
370 lbtf_deb_enter(LBTF_DEB_CMD);
306 371
307 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 372 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
308 cmd.action = cpu_to_le16(CMD_ACT_SET); 373 cmd.action = cpu_to_le16(CMD_ACT_SET);
@@ -310,6 +375,7 @@ int lbtf_set_mac_address(struct lbtf_private *priv, uint8_t *mac_addr)
310 memcpy(cmd.macadd, mac_addr, ETH_ALEN); 375 memcpy(cmd.macadd, mac_addr, ETH_ALEN);
311 376
312 lbtf_cmd_async(priv, CMD_802_11_MAC_ADDRESS, &cmd.hdr, sizeof(cmd)); 377 lbtf_cmd_async(priv, CMD_802_11_MAC_ADDRESS, &cmd.hdr, sizeof(cmd));
378 lbtf_deb_leave(LBTF_DEB_CMD);
313 return 0; 379 return 0;
314} 380}
315 381
@@ -318,6 +384,8 @@ int lbtf_set_radio_control(struct lbtf_private *priv)
318 int ret = 0; 384 int ret = 0;
319 struct cmd_ds_802_11_radio_control cmd; 385 struct cmd_ds_802_11_radio_control cmd;
320 386
387 lbtf_deb_enter(LBTF_DEB_CMD);
388
321 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 389 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
322 cmd.action = cpu_to_le16(CMD_ACT_SET); 390 cmd.action = cpu_to_le16(CMD_ACT_SET);
323 391
@@ -341,19 +409,28 @@ int lbtf_set_radio_control(struct lbtf_private *priv)
341 else 409 else
342 cmd.control &= cpu_to_le16(~TURN_ON_RF); 410 cmd.control &= cpu_to_le16(~TURN_ON_RF);
343 411
412 lbtf_deb_cmd("RADIO_SET: radio %d, preamble %d\n", priv->radioon,
413 priv->preamble);
414
344 ret = lbtf_cmd_with_response(priv, CMD_802_11_RADIO_CONTROL, &cmd); 415 ret = lbtf_cmd_with_response(priv, CMD_802_11_RADIO_CONTROL, &cmd);
416
417 lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", ret);
345 return ret; 418 return ret;
346} 419}
347 420
348void lbtf_set_mac_control(struct lbtf_private *priv) 421void lbtf_set_mac_control(struct lbtf_private *priv)
349{ 422{
350 struct cmd_ds_mac_control cmd; 423 struct cmd_ds_mac_control cmd;
424 lbtf_deb_enter(LBTF_DEB_CMD);
425
351 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 426 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
352 cmd.action = cpu_to_le16(priv->mac_control); 427 cmd.action = cpu_to_le16(priv->mac_control);
353 cmd.reserved = 0; 428 cmd.reserved = 0;
354 429
355 lbtf_cmd_async(priv, CMD_MAC_CONTROL, 430 lbtf_cmd_async(priv, CMD_MAC_CONTROL,
356 &cmd.hdr, sizeof(cmd)); 431 &cmd.hdr, sizeof(cmd));
432
433 lbtf_deb_leave(LBTF_DEB_CMD);
357} 434}
358 435
359/** 436/**
@@ -365,29 +442,43 @@ void lbtf_set_mac_control(struct lbtf_private *priv)
365 */ 442 */
366int lbtf_allocate_cmd_buffer(struct lbtf_private *priv) 443int lbtf_allocate_cmd_buffer(struct lbtf_private *priv)
367{ 444{
445 int ret = 0;
368 u32 bufsize; 446 u32 bufsize;
369 u32 i; 447 u32 i;
370 struct cmd_ctrl_node *cmdarray; 448 struct cmd_ctrl_node *cmdarray;
371 449
450 lbtf_deb_enter(LBTF_DEB_HOST);
451
372 /* Allocate and initialize the command array */ 452 /* Allocate and initialize the command array */
373 bufsize = sizeof(struct cmd_ctrl_node) * LBS_NUM_CMD_BUFFERS; 453 bufsize = sizeof(struct cmd_ctrl_node) * LBS_NUM_CMD_BUFFERS;
374 cmdarray = kzalloc(bufsize, GFP_KERNEL); 454 cmdarray = kzalloc(bufsize, GFP_KERNEL);
375 if (!cmdarray) 455 if (!cmdarray) {
376 return -1; 456 lbtf_deb_host("ALLOC_CMD_BUF: tempcmd_array is NULL\n");
457 ret = -1;
458 goto done;
459 }
377 priv->cmd_array = cmdarray; 460 priv->cmd_array = cmdarray;
378 461
379 /* Allocate and initialize each command buffer in the command array */ 462 /* Allocate and initialize each command buffer in the command array */
380 for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) { 463 for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
381 cmdarray[i].cmdbuf = kzalloc(LBS_CMD_BUFFER_SIZE, GFP_KERNEL); 464 cmdarray[i].cmdbuf = kzalloc(LBS_CMD_BUFFER_SIZE, GFP_KERNEL);
382 if (!cmdarray[i].cmdbuf) 465 if (!cmdarray[i].cmdbuf) {
383 return -1; 466 lbtf_deb_host("ALLOC_CMD_BUF: ptempvirtualaddr is NULL\n");
467 ret = -1;
468 goto done;
469 }
384 } 470 }
385 471
386 for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) { 472 for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
387 init_waitqueue_head(&cmdarray[i].cmdwait_q); 473 init_waitqueue_head(&cmdarray[i].cmdwait_q);
388 lbtf_cleanup_and_insert_cmd(priv, &cmdarray[i]); 474 lbtf_cleanup_and_insert_cmd(priv, &cmdarray[i]);
389 } 475 }
390 return 0; 476
477 ret = 0;
478
479done:
480 lbtf_deb_leave_args(LBTF_DEB_HOST, "ret %d", ret);
481 return ret;
391} 482}
392 483
393/** 484/**
@@ -402,9 +493,13 @@ int lbtf_free_cmd_buffer(struct lbtf_private *priv)
402 struct cmd_ctrl_node *cmdarray; 493 struct cmd_ctrl_node *cmdarray;
403 unsigned int i; 494 unsigned int i;
404 495
496 lbtf_deb_enter(LBTF_DEB_HOST);
497
405 /* need to check if cmd array is allocated or not */ 498 /* need to check if cmd array is allocated or not */
406 if (priv->cmd_array == NULL) 499 if (priv->cmd_array == NULL) {
407 return 0; 500 lbtf_deb_host("FREE_CMD_BUF: cmd_array is NULL\n");
501 goto done;
502 }
408 503
409 cmdarray = priv->cmd_array; 504 cmdarray = priv->cmd_array;
410 505
@@ -418,6 +513,8 @@ int lbtf_free_cmd_buffer(struct lbtf_private *priv)
418 kfree(priv->cmd_array); 513 kfree(priv->cmd_array);
419 priv->cmd_array = NULL; 514 priv->cmd_array = NULL;
420 515
516done:
517 lbtf_deb_leave(LBTF_DEB_HOST);
421 return 0; 518 return 0;
422} 519}
423 520
@@ -433,6 +530,8 @@ static struct cmd_ctrl_node *lbtf_get_cmd_ctrl_node(struct lbtf_private *priv)
433 struct cmd_ctrl_node *tempnode; 530 struct cmd_ctrl_node *tempnode;
434 unsigned long flags; 531 unsigned long flags;
435 532
533 lbtf_deb_enter(LBTF_DEB_HOST);
534
436 if (!priv) 535 if (!priv)
437 return NULL; 536 return NULL;
438 537
@@ -442,11 +541,14 @@ static struct cmd_ctrl_node *lbtf_get_cmd_ctrl_node(struct lbtf_private *priv)
442 tempnode = list_first_entry(&priv->cmdfreeq, 541 tempnode = list_first_entry(&priv->cmdfreeq,
443 struct cmd_ctrl_node, list); 542 struct cmd_ctrl_node, list);
444 list_del(&tempnode->list); 543 list_del(&tempnode->list);
445 } else 544 } else {
545 lbtf_deb_host("GET_CMD_NODE: cmd_ctrl_node is not available\n");
446 tempnode = NULL; 546 tempnode = NULL;
547 }
447 548
448 spin_unlock_irqrestore(&priv->driver_lock, flags); 549 spin_unlock_irqrestore(&priv->driver_lock, flags);
449 550
551 lbtf_deb_leave(LBTF_DEB_HOST);
450 return tempnode; 552 return tempnode;
451} 553}
452 554
@@ -462,16 +564,20 @@ int lbtf_execute_next_command(struct lbtf_private *priv)
462 struct cmd_ctrl_node *cmdnode = NULL; 564 struct cmd_ctrl_node *cmdnode = NULL;
463 struct cmd_header *cmd; 565 struct cmd_header *cmd;
464 unsigned long flags; 566 unsigned long flags;
567 int ret = 0;
465 568
466 /* Debug group is LBS_DEB_THREAD and not LBS_DEB_HOST, because the 569 /* Debug group is lbtf_deb_THREAD and not lbtf_deb_HOST, because the
467 * only caller to us is lbtf_thread() and we get even when a 570 * only caller to us is lbtf_thread() and we get even when a
468 * data packet is received */ 571 * data packet is received */
572 lbtf_deb_enter(LBTF_DEB_THREAD);
469 573
470 spin_lock_irqsave(&priv->driver_lock, flags); 574 spin_lock_irqsave(&priv->driver_lock, flags);
471 575
472 if (priv->cur_cmd) { 576 if (priv->cur_cmd) {
577 pr_alert("EXEC_NEXT_CMD: already processing command!\n");
473 spin_unlock_irqrestore(&priv->driver_lock, flags); 578 spin_unlock_irqrestore(&priv->driver_lock, flags);
474 return -1; 579 ret = -1;
580 goto done;
475 } 581 }
476 582
477 if (!list_empty(&priv->cmdpendingq)) { 583 if (!list_empty(&priv->cmdpendingq)) {
@@ -483,11 +589,17 @@ int lbtf_execute_next_command(struct lbtf_private *priv)
483 cmd = cmdnode->cmdbuf; 589 cmd = cmdnode->cmdbuf;
484 590
485 list_del(&cmdnode->list); 591 list_del(&cmdnode->list);
592 lbtf_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n",
593 le16_to_cpu(cmd->command));
486 spin_unlock_irqrestore(&priv->driver_lock, flags); 594 spin_unlock_irqrestore(&priv->driver_lock, flags);
487 lbtf_submit_command(priv, cmdnode); 595 lbtf_submit_command(priv, cmdnode);
488 } else 596 } else
489 spin_unlock_irqrestore(&priv->driver_lock, flags); 597 spin_unlock_irqrestore(&priv->driver_lock, flags);
490 return 0; 598
599 ret = 0;
600done:
601 lbtf_deb_leave(LBTF_DEB_THREAD);
602 return ret;
491} 603}
492 604
493static struct cmd_ctrl_node *__lbtf_cmd_async(struct lbtf_private *priv, 605static struct cmd_ctrl_node *__lbtf_cmd_async(struct lbtf_private *priv,
@@ -498,14 +610,22 @@ static struct cmd_ctrl_node *__lbtf_cmd_async(struct lbtf_private *priv,
498{ 610{
499 struct cmd_ctrl_node *cmdnode; 611 struct cmd_ctrl_node *cmdnode;
500 612
501 if (priv->surpriseremoved) 613 lbtf_deb_enter(LBTF_DEB_HOST);
502 return ERR_PTR(-ENOENT); 614
615 if (priv->surpriseremoved) {
616 lbtf_deb_host("PREP_CMD: card removed\n");
617 cmdnode = ERR_PTR(-ENOENT);
618 goto done;
619 }
503 620
504 cmdnode = lbtf_get_cmd_ctrl_node(priv); 621 cmdnode = lbtf_get_cmd_ctrl_node(priv);
505 if (cmdnode == NULL) { 622 if (cmdnode == NULL) {
623 lbtf_deb_host("PREP_CMD: cmdnode is NULL\n");
624
506 /* Wake up main thread to execute next command */ 625 /* Wake up main thread to execute next command */
507 queue_work(lbtf_wq, &priv->cmd_work); 626 queue_work(lbtf_wq, &priv->cmd_work);
508 return ERR_PTR(-ENOBUFS); 627 cmdnode = ERR_PTR(-ENOBUFS);
628 goto done;
509 } 629 }
510 630
511 cmdnode->callback = callback; 631 cmdnode->callback = callback;
@@ -520,17 +640,24 @@ static struct cmd_ctrl_node *__lbtf_cmd_async(struct lbtf_private *priv,
520 cmdnode->cmdbuf->size = cpu_to_le16(in_cmd_size); 640 cmdnode->cmdbuf->size = cpu_to_le16(in_cmd_size);
521 cmdnode->cmdbuf->seqnum = cpu_to_le16(priv->seqnum); 641 cmdnode->cmdbuf->seqnum = cpu_to_le16(priv->seqnum);
522 cmdnode->cmdbuf->result = 0; 642 cmdnode->cmdbuf->result = 0;
643
644 lbtf_deb_host("PREP_CMD: command 0x%04x\n", command);
645
523 cmdnode->cmdwaitqwoken = 0; 646 cmdnode->cmdwaitqwoken = 0;
524 lbtf_queue_cmd(priv, cmdnode); 647 lbtf_queue_cmd(priv, cmdnode);
525 queue_work(lbtf_wq, &priv->cmd_work); 648 queue_work(lbtf_wq, &priv->cmd_work);
526 649
650 done:
651 lbtf_deb_leave_args(LBTF_DEB_HOST, "ret %p", cmdnode);
527 return cmdnode; 652 return cmdnode;
528} 653}
529 654
530void lbtf_cmd_async(struct lbtf_private *priv, uint16_t command, 655void lbtf_cmd_async(struct lbtf_private *priv, uint16_t command,
531 struct cmd_header *in_cmd, int in_cmd_size) 656 struct cmd_header *in_cmd, int in_cmd_size)
532{ 657{
658 lbtf_deb_enter(LBTF_DEB_CMD);
533 __lbtf_cmd_async(priv, command, in_cmd, in_cmd_size, NULL, 0); 659 __lbtf_cmd_async(priv, command, in_cmd, in_cmd_size, NULL, 0);
660 lbtf_deb_leave(LBTF_DEB_CMD);
534} 661}
535 662
536int __lbtf_cmd(struct lbtf_private *priv, uint16_t command, 663int __lbtf_cmd(struct lbtf_private *priv, uint16_t command,
@@ -543,30 +670,35 @@ int __lbtf_cmd(struct lbtf_private *priv, uint16_t command,
543 unsigned long flags; 670 unsigned long flags;
544 int ret = 0; 671 int ret = 0;
545 672
673 lbtf_deb_enter(LBTF_DEB_HOST);
674
546 cmdnode = __lbtf_cmd_async(priv, command, in_cmd, in_cmd_size, 675 cmdnode = __lbtf_cmd_async(priv, command, in_cmd, in_cmd_size,
547 callback, callback_arg); 676 callback, callback_arg);
548 if (IS_ERR(cmdnode)) 677 if (IS_ERR(cmdnode)) {
549 return PTR_ERR(cmdnode); 678 ret = PTR_ERR(cmdnode);
679 goto done;
680 }
550 681
551 might_sleep(); 682 might_sleep();
552 ret = wait_event_interruptible(cmdnode->cmdwait_q, 683 ret = wait_event_interruptible(cmdnode->cmdwait_q,
553 cmdnode->cmdwaitqwoken); 684 cmdnode->cmdwaitqwoken);
554 if (ret) { 685 if (ret) {
555 printk(KERN_DEBUG 686 pr_info("PREP_CMD: command 0x%04x interrupted by signal: %d\n",
556 "libertastf: command 0x%04x interrupted by signal", 687 command, ret);
557 command); 688 goto done;
558 return ret;
559 } 689 }
560 690
561 spin_lock_irqsave(&priv->driver_lock, flags); 691 spin_lock_irqsave(&priv->driver_lock, flags);
562 ret = cmdnode->result; 692 ret = cmdnode->result;
563 if (ret) 693 if (ret)
564 printk(KERN_DEBUG "libertastf: command 0x%04x failed: %d\n", 694 pr_info("PREP_CMD: command 0x%04x failed: %d\n",
565 command, ret); 695 command, ret);
566 696
567 __lbtf_cleanup_and_insert_cmd(priv, cmdnode); 697 __lbtf_cleanup_and_insert_cmd(priv, cmdnode);
568 spin_unlock_irqrestore(&priv->driver_lock, flags); 698 spin_unlock_irqrestore(&priv->driver_lock, flags);
569 699
700done:
701 lbtf_deb_leave_args(LBTF_DEB_HOST, "ret %d", ret);
570 return ret; 702 return ret;
571} 703}
572EXPORT_SYMBOL_GPL(__lbtf_cmd); 704EXPORT_SYMBOL_GPL(__lbtf_cmd);
@@ -587,6 +719,8 @@ int lbtf_process_rx_command(struct lbtf_private *priv)
587 unsigned long flags; 719 unsigned long flags;
588 uint16_t result; 720 uint16_t result;
589 721
722 lbtf_deb_enter(LBTF_DEB_CMD);
723
590 mutex_lock(&priv->lock); 724 mutex_lock(&priv->lock);
591 spin_lock_irqsave(&priv->driver_lock, flags); 725 spin_lock_irqsave(&priv->driver_lock, flags);
592 726
@@ -602,7 +736,7 @@ int lbtf_process_rx_command(struct lbtf_private *priv)
602 result = le16_to_cpu(resp->result); 736 result = le16_to_cpu(resp->result);
603 737
604 if (net_ratelimit()) 738 if (net_ratelimit())
605 printk(KERN_DEBUG "libertastf: cmd response 0x%04x, seq %d, size %d\n", 739 pr_info("libertastf: cmd response 0x%04x, seq %d, size %d\n",
606 respcmd, le16_to_cpu(resp->seqnum), 740 respcmd, le16_to_cpu(resp->seqnum),
607 le16_to_cpu(resp->size)); 741 le16_to_cpu(resp->size));
608 742
@@ -639,7 +773,7 @@ int lbtf_process_rx_command(struct lbtf_private *priv)
639 switch (respcmd) { 773 switch (respcmd) {
640 case CMD_RET(CMD_GET_HW_SPEC): 774 case CMD_RET(CMD_GET_HW_SPEC):
641 case CMD_RET(CMD_802_11_RESET): 775 case CMD_RET(CMD_802_11_RESET):
642 printk(KERN_DEBUG "libertastf: reset failed\n"); 776 pr_info("libertastf: reset failed\n");
643 break; 777 break;
644 778
645 } 779 }
@@ -666,5 +800,6 @@ int lbtf_process_rx_command(struct lbtf_private *priv)
666 800
667done: 801done:
668 mutex_unlock(&priv->lock); 802 mutex_unlock(&priv->lock);
803 lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", ret);
669 return ret; 804 return ret;
670} 805}
diff --git a/drivers/net/wireless/libertas_tf/deb_defs.h b/drivers/net/wireless/libertas_tf/deb_defs.h
new file mode 100644
index 000000000000..ae753962d8b5
--- /dev/null
+++ b/drivers/net/wireless/libertas_tf/deb_defs.h
@@ -0,0 +1,104 @@
1/**
2 * This header file contains global constant/enum definitions,
3 * global variable declaration.
4 */
5#ifndef _LBS_DEB_DEFS_H_
6#define _LBS_DEB_EFS_H_
7
8#ifndef DRV_NAME
9#define DRV_NAME "libertas_tf"
10#endif
11
12#include <linux/spinlock.h>
13
14#ifdef CONFIG_LIBERTAS_THINFIRM_DEBUG
15#define DEBUG
16#define PROC_DEBUG
17#endif
18
19#define LBTF_DEB_ENTER 0x00000001
20#define LBTF_DEB_LEAVE 0x00000002
21#define LBTF_DEB_MAIN 0x00000004
22#define LBTF_DEB_NET 0x00000008
23#define LBTF_DEB_MESH 0x00000010
24#define LBTF_DEB_WEXT 0x00000020
25#define LBTF_DEB_IOCTL 0x00000040
26#define LBTF_DEB_SCAN 0x00000080
27#define LBTF_DEB_ASSOC 0x00000100
28#define LBTF_DEB_JOIN 0x00000200
29#define LBTF_DEB_11D 0x00000400
30#define LBTF_DEB_DEBUGFS 0x00000800
31#define LBTF_DEB_ETHTOOL 0x00001000
32#define LBTF_DEB_HOST 0x00002000
33#define LBTF_DEB_CMD 0x00004000
34#define LBTF_DEB_RX 0x00008000
35#define LBTF_DEB_TX 0x00010000
36#define LBTF_DEB_USB 0x00020000
37#define LBTF_DEB_CS 0x00040000
38#define LBTF_DEB_FW 0x00080000
39#define LBTF_DEB_THREAD 0x00100000
40#define LBTF_DEB_HEX 0x00200000
41#define LBTF_DEB_SDIO 0x00400000
42#define LBTF_DEB_MACOPS 0x00800000
43
44extern unsigned int lbtf_debug;
45
46
47#ifdef DEBUG
48#define LBTF_DEB_LL(grp, grpnam, fmt, args...) \
49do { if ((lbtf_debug & (grp)) == (grp)) \
50 printk(KERN_DEBUG DRV_NAME grpnam "%s: " fmt, \
51 in_interrupt() ? " (INT)" : "", ## args); } while (0)
52#else
53#define LBTF_DEB_LL(grp, grpnam, fmt, args...) do {} while (0)
54#endif
55
56#define lbtf_deb_enter(grp) \
57 LBTF_DEB_LL(grp | LBTF_DEB_ENTER, " enter", "%s()\n", __func__);
58#define lbtf_deb_enter_args(grp, fmt, args...) \
59 LBTF_DEB_LL(grp | LBTF_DEB_ENTER, " enter", "%s(" fmt ")\n", __func__, ## args);
60#define lbtf_deb_leave(grp) \
61 LBTF_DEB_LL(grp | LBTF_DEB_LEAVE, " leave", "%s()\n", __func__);
62#define lbtf_deb_leave_args(grp, fmt, args...) \
63 LBTF_DEB_LL(grp | LBTF_DEB_LEAVE, " leave", "%s(), " fmt "\n", \
64 __func__, ##args);
65#define lbtf_deb_main(fmt, args...) LBTF_DEB_LL(LBTF_DEB_MAIN, " main", fmt, ##args)
66#define lbtf_deb_net(fmt, args...) LBTF_DEB_LL(LBTF_DEB_NET, " net", fmt, ##args)
67#define lbtf_deb_mesh(fmt, args...) LBTF_DEB_LL(LBTF_DEB_MESH, " mesh", fmt, ##args)
68#define lbtf_deb_wext(fmt, args...) LBTF_DEB_LL(LBTF_DEB_WEXT, " wext", fmt, ##args)
69#define lbtf_deb_ioctl(fmt, args...) LBTF_DEB_LL(LBTF_DEB_IOCTL, " ioctl", fmt, ##args)
70#define lbtf_deb_scan(fmt, args...) LBTF_DEB_LL(LBTF_DEB_SCAN, " scan", fmt, ##args)
71#define lbtf_deb_assoc(fmt, args...) LBTF_DEB_LL(LBTF_DEB_ASSOC, " assoc", fmt, ##args)
72#define lbtf_deb_join(fmt, args...) LBTF_DEB_LL(LBTF_DEB_JOIN, " join", fmt, ##args)
73#define lbtf_deb_11d(fmt, args...) LBTF_DEB_LL(LBTF_DEB_11D, " 11d", fmt, ##args)
74#define lbtf_deb_debugfs(fmt, args...) LBTF_DEB_LL(LBTF_DEB_DEBUGFS, " debugfs", fmt, ##args)
75#define lbtf_deb_ethtool(fmt, args...) LBTF_DEB_LL(LBTF_DEB_ETHTOOL, " ethtool", fmt, ##args)
76#define lbtf_deb_host(fmt, args...) LBTF_DEB_LL(LBTF_DEB_HOST, " host", fmt, ##args)
77#define lbtf_deb_cmd(fmt, args...) LBTF_DEB_LL(LBTF_DEB_CMD, " cmd", fmt, ##args)
78#define lbtf_deb_rx(fmt, args...) LBTF_DEB_LL(LBTF_DEB_RX, " rx", fmt, ##args)
79#define lbtf_deb_tx(fmt, args...) LBTF_DEB_LL(LBTF_DEB_TX, " tx", fmt, ##args)
80#define lbtf_deb_fw(fmt, args...) LBTF_DEB_LL(LBTF_DEB_FW, " fw", fmt, ##args)
81#define lbtf_deb_usb(fmt, args...) LBTF_DEB_LL(LBTF_DEB_USB, " usb", fmt, ##args)
82#define lbtf_deb_usbd(dev, fmt, args...) LBTF_DEB_LL(LBTF_DEB_USB, " usbd", "%s:" fmt, dev_name(dev), ##args)
83#define lbtf_deb_cs(fmt, args...) LBTF_DEB_LL(LBTF_DEB_CS, " cs", fmt, ##args)
84#define lbtf_deb_thread(fmt, args...) LBTF_DEB_LL(LBTF_DEB_THREAD, " thread", fmt, ##args)
85#define lbtf_deb_sdio(fmt, args...) LBTF_DEB_LL(LBTF_DEB_SDIO, " thread", fmt, ##args)
86#define lbtf_deb_macops(fmt, args...) LBTF_DEB_LL(LBTF_DEB_MACOPS, " thread", fmt, ##args)
87
88#ifdef DEBUG
89static inline void lbtf_deb_hex(unsigned int grp, const char *prompt, u8 *buf, int len)
90{
91 char newprompt[32];
92
93 if (len &&
94 (lbtf_debug & LBTF_DEB_HEX) &&
95 (lbtf_debug & grp)) {
96 snprintf(newprompt, sizeof(newprompt), DRV_NAME " %s: ", prompt);
97 print_hex_dump_bytes(prompt, DUMP_PREFIX_NONE, buf, len);
98 }
99}
100#else
101#define lbtf_deb_hex(grp, prompt, buf, len) do {} while (0)
102#endif
103
104#endif
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 8cc9db60c14b..4412c279ca94 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -7,6 +7,13 @@
7 * the Free Software Foundation; either version 2 of the License, or (at 7 * the Free Software Foundation; either version 2 of the License, or (at
8 * your option) any later version. 8 * your option) any later version.
9 */ 9 */
10#define DRV_NAME "lbtf_usb"
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include "libertas_tf.h"
15#include "if_usb.h"
16
10#include <linux/delay.h> 17#include <linux/delay.h>
11#include <linux/moduleparam.h> 18#include <linux/moduleparam.h>
12#include <linux/firmware.h> 19#include <linux/firmware.h>
@@ -14,10 +21,8 @@
14#include <linux/slab.h> 21#include <linux/slab.h>
15#include <linux/usb.h> 22#include <linux/usb.h>
16 23
17#define DRV_NAME "lbtf_usb" 24#define INSANEDEBUG 0
18 25#define lbtf_deb_usb2(...) do { if (INSANEDEBUG) lbtf_deb_usbd(__VA_ARGS__); } while (0)
19#include "libertas_tf.h"
20#include "if_usb.h"
21 26
22#define MESSAGE_HEADER_LEN 4 27#define MESSAGE_HEADER_LEN 4
23 28
@@ -53,9 +58,14 @@ static int if_usb_reset_device(struct if_usb_card *cardp);
53 */ 58 */
54static void if_usb_write_bulk_callback(struct urb *urb) 59static void if_usb_write_bulk_callback(struct urb *urb)
55{ 60{
56 if (urb->status != 0) 61 if (urb->status != 0) {
57 printk(KERN_INFO "libertastf: URB in failure status: %d\n", 62 /* print the failure status number for debug */
58 urb->status); 63 pr_info("URB in failure status: %d\n", urb->status);
64 } else {
65 lbtf_deb_usb2(&urb->dev->dev, "URB status is successful\n");
66 lbtf_deb_usb2(&urb->dev->dev, "Actual length transmitted %d\n",
67 urb->actual_length);
68 }
59} 69}
60 70
61/** 71/**
@@ -65,6 +75,8 @@ static void if_usb_write_bulk_callback(struct urb *urb)
65 */ 75 */
66static void if_usb_free(struct if_usb_card *cardp) 76static void if_usb_free(struct if_usb_card *cardp)
67{ 77{
78 lbtf_deb_enter(LBTF_DEB_USB);
79
68 /* Unlink tx & rx urb */ 80 /* Unlink tx & rx urb */
69 usb_kill_urb(cardp->tx_urb); 81 usb_kill_urb(cardp->tx_urb);
70 usb_kill_urb(cardp->rx_urb); 82 usb_kill_urb(cardp->rx_urb);
@@ -81,6 +93,8 @@ static void if_usb_free(struct if_usb_card *cardp)
81 93
82 kfree(cardp->ep_out_buf); 94 kfree(cardp->ep_out_buf);
83 cardp->ep_out_buf = NULL; 95 cardp->ep_out_buf = NULL;
96
97 lbtf_deb_leave(LBTF_DEB_USB);
84} 98}
85 99
86static void if_usb_setup_firmware(struct lbtf_private *priv) 100static void if_usb_setup_firmware(struct lbtf_private *priv)
@@ -88,23 +102,33 @@ static void if_usb_setup_firmware(struct lbtf_private *priv)
88 struct if_usb_card *cardp = priv->card; 102 struct if_usb_card *cardp = priv->card;
89 struct cmd_ds_set_boot2_ver b2_cmd; 103 struct cmd_ds_set_boot2_ver b2_cmd;
90 104
105 lbtf_deb_enter(LBTF_DEB_USB);
106
91 if_usb_submit_rx_urb(cardp); 107 if_usb_submit_rx_urb(cardp);
92 b2_cmd.hdr.size = cpu_to_le16(sizeof(b2_cmd)); 108 b2_cmd.hdr.size = cpu_to_le16(sizeof(b2_cmd));
93 b2_cmd.action = 0; 109 b2_cmd.action = 0;
94 b2_cmd.version = cardp->boot2_version; 110 b2_cmd.version = cardp->boot2_version;
95 111
96 if (lbtf_cmd_with_response(priv, CMD_SET_BOOT2_VER, &b2_cmd)) 112 if (lbtf_cmd_with_response(priv, CMD_SET_BOOT2_VER, &b2_cmd))
97 printk(KERN_INFO "libertastf: setting boot2 version failed\n"); 113 lbtf_deb_usb("Setting boot2 version failed\n");
114
115 lbtf_deb_leave(LBTF_DEB_USB);
98} 116}
99 117
100static void if_usb_fw_timeo(unsigned long priv) 118static void if_usb_fw_timeo(unsigned long priv)
101{ 119{
102 struct if_usb_card *cardp = (void *)priv; 120 struct if_usb_card *cardp = (void *)priv;
103 121
104 if (!cardp->fwdnldover) 122 lbtf_deb_enter(LBTF_DEB_USB);
123 if (!cardp->fwdnldover) {
105 /* Download timed out */ 124 /* Download timed out */
106 cardp->priv->surpriseremoved = 1; 125 cardp->priv->surpriseremoved = 1;
126 pr_err("Download timed out\n");
127 } else {
128 lbtf_deb_usb("Download complete, no event. Assuming success\n");
129 }
107 wake_up(&cardp->fw_wq); 130 wake_up(&cardp->fw_wq);
131 lbtf_deb_leave(LBTF_DEB_USB);
108} 132}
109 133
110/** 134/**
@@ -125,11 +149,14 @@ static int if_usb_probe(struct usb_interface *intf,
125 struct if_usb_card *cardp; 149 struct if_usb_card *cardp;
126 int i; 150 int i;
127 151
152 lbtf_deb_enter(LBTF_DEB_USB);
128 udev = interface_to_usbdev(intf); 153 udev = interface_to_usbdev(intf);
129 154
130 cardp = kzalloc(sizeof(struct if_usb_card), GFP_KERNEL); 155 cardp = kzalloc(sizeof(struct if_usb_card), GFP_KERNEL);
131 if (!cardp) 156 if (!cardp) {
157 pr_err("Out of memory allocating private data.\n");
132 goto error; 158 goto error;
159 }
133 160
134 setup_timer(&cardp->fw_timeout, if_usb_fw_timeo, (unsigned long)cardp); 161 setup_timer(&cardp->fw_timeout, if_usb_fw_timeo, (unsigned long)cardp);
135 init_waitqueue_head(&cardp->fw_wq); 162 init_waitqueue_head(&cardp->fw_wq);
@@ -137,38 +164,62 @@ static int if_usb_probe(struct usb_interface *intf,
137 cardp->udev = udev; 164 cardp->udev = udev;
138 iface_desc = intf->cur_altsetting; 165 iface_desc = intf->cur_altsetting;
139 166
167 lbtf_deb_usbd(&udev->dev, "bcdUSB = 0x%X bDeviceClass = 0x%X"
168 " bDeviceSubClass = 0x%X, bDeviceProtocol = 0x%X\n",
169 le16_to_cpu(udev->descriptor.bcdUSB),
170 udev->descriptor.bDeviceClass,
171 udev->descriptor.bDeviceSubClass,
172 udev->descriptor.bDeviceProtocol);
173
140 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { 174 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
141 endpoint = &iface_desc->endpoint[i].desc; 175 endpoint = &iface_desc->endpoint[i].desc;
142 if (usb_endpoint_is_bulk_in(endpoint)) { 176 if (usb_endpoint_is_bulk_in(endpoint)) {
143 cardp->ep_in_size = 177 cardp->ep_in_size =
144 le16_to_cpu(endpoint->wMaxPacketSize); 178 le16_to_cpu(endpoint->wMaxPacketSize);
145 cardp->ep_in = usb_endpoint_num(endpoint); 179 cardp->ep_in = usb_endpoint_num(endpoint);
180
181 lbtf_deb_usbd(&udev->dev, "in_endpoint = %d\n", cardp->ep_in);
182 lbtf_deb_usbd(&udev->dev, "Bulk in size is %d\n", cardp->ep_in_size);
146 } else if (usb_endpoint_is_bulk_out(endpoint)) { 183 } else if (usb_endpoint_is_bulk_out(endpoint)) {
147 cardp->ep_out_size = 184 cardp->ep_out_size =
148 le16_to_cpu(endpoint->wMaxPacketSize); 185 le16_to_cpu(endpoint->wMaxPacketSize);
149 cardp->ep_out = usb_endpoint_num(endpoint); 186 cardp->ep_out = usb_endpoint_num(endpoint);
187
188 lbtf_deb_usbd(&udev->dev, "out_endpoint = %d\n", cardp->ep_out);
189 lbtf_deb_usbd(&udev->dev, "Bulk out size is %d\n",
190 cardp->ep_out_size);
150 } 191 }
151 } 192 }
152 if (!cardp->ep_out_size || !cardp->ep_in_size) 193 if (!cardp->ep_out_size || !cardp->ep_in_size) {
194 lbtf_deb_usbd(&udev->dev, "Endpoints not found\n");
153 /* Endpoints not found */ 195 /* Endpoints not found */
154 goto dealloc; 196 goto dealloc;
197 }
155 198
156 cardp->rx_urb = usb_alloc_urb(0, GFP_KERNEL); 199 cardp->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
157 if (!cardp->rx_urb) 200 if (!cardp->rx_urb) {
201 lbtf_deb_usbd(&udev->dev, "Rx URB allocation failed\n");
158 goto dealloc; 202 goto dealloc;
203 }
159 204
160 cardp->tx_urb = usb_alloc_urb(0, GFP_KERNEL); 205 cardp->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
161 if (!cardp->tx_urb) 206 if (!cardp->tx_urb) {
207 lbtf_deb_usbd(&udev->dev, "Tx URB allocation failed\n");
162 goto dealloc; 208 goto dealloc;
209 }
163 210
164 cardp->cmd_urb = usb_alloc_urb(0, GFP_KERNEL); 211 cardp->cmd_urb = usb_alloc_urb(0, GFP_KERNEL);
165 if (!cardp->cmd_urb) 212 if (!cardp->cmd_urb) {
213 lbtf_deb_usbd(&udev->dev, "Cmd URB allocation failed\n");
166 goto dealloc; 214 goto dealloc;
215 }
167 216
168 cardp->ep_out_buf = kmalloc(MRVDRV_ETH_TX_PACKET_BUFFER_SIZE, 217 cardp->ep_out_buf = kmalloc(MRVDRV_ETH_TX_PACKET_BUFFER_SIZE,
169 GFP_KERNEL); 218 GFP_KERNEL);
170 if (!cardp->ep_out_buf) 219 if (!cardp->ep_out_buf) {
220 lbtf_deb_usbd(&udev->dev, "Could not allocate buffer\n");
171 goto dealloc; 221 goto dealloc;
222 }
172 223
173 priv = lbtf_add_card(cardp, &udev->dev); 224 priv = lbtf_add_card(cardp, &udev->dev);
174 if (!priv) 225 if (!priv)
@@ -189,6 +240,7 @@ static int if_usb_probe(struct usb_interface *intf,
189dealloc: 240dealloc:
190 if_usb_free(cardp); 241 if_usb_free(cardp);
191error: 242error:
243lbtf_deb_leave(LBTF_DEB_MAIN);
192 return -ENOMEM; 244 return -ENOMEM;
193} 245}
194 246
@@ -202,6 +254,8 @@ static void if_usb_disconnect(struct usb_interface *intf)
202 struct if_usb_card *cardp = usb_get_intfdata(intf); 254 struct if_usb_card *cardp = usb_get_intfdata(intf);
203 struct lbtf_private *priv = (struct lbtf_private *) cardp->priv; 255 struct lbtf_private *priv = (struct lbtf_private *) cardp->priv;
204 256
257 lbtf_deb_enter(LBTF_DEB_MAIN);
258
205 if_usb_reset_device(cardp); 259 if_usb_reset_device(cardp);
206 260
207 if (priv) 261 if (priv)
@@ -212,6 +266,8 @@ static void if_usb_disconnect(struct usb_interface *intf)
212 266
213 usb_set_intfdata(intf, NULL); 267 usb_set_intfdata(intf, NULL);
214 usb_put_dev(interface_to_usbdev(intf)); 268 usb_put_dev(interface_to_usbdev(intf));
269
270 lbtf_deb_leave(LBTF_DEB_MAIN);
215} 271}
216 272
217/** 273/**
@@ -226,6 +282,8 @@ static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
226 struct fwdata *fwdata = cardp->ep_out_buf; 282 struct fwdata *fwdata = cardp->ep_out_buf;
227 u8 *firmware = (u8 *) cardp->fw->data; 283 u8 *firmware = (u8 *) cardp->fw->data;
228 284
285 lbtf_deb_enter(LBTF_DEB_FW);
286
229 /* If we got a CRC failure on the last block, back 287 /* If we got a CRC failure on the last block, back
230 up and retry it */ 288 up and retry it */
231 if (!cardp->CRC_OK) { 289 if (!cardp->CRC_OK) {
@@ -233,6 +291,9 @@ static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
233 cardp->fwseqnum--; 291 cardp->fwseqnum--;
234 } 292 }
235 293
294 lbtf_deb_usb2(&cardp->udev->dev, "totalbytes = %d\n",
295 cardp->totalbytes);
296
236 /* struct fwdata (which we sent to the card) has an 297 /* struct fwdata (which we sent to the card) has an
237 extra __le32 field in between the header and the data, 298 extra __le32 field in between the header and the data,
238 which is not in the struct fwheader in the actual 299 which is not in the struct fwheader in the actual
@@ -246,18 +307,33 @@ static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
246 memcpy(fwdata->data, &firmware[cardp->totalbytes], 307 memcpy(fwdata->data, &firmware[cardp->totalbytes],
247 le32_to_cpu(fwdata->hdr.datalength)); 308 le32_to_cpu(fwdata->hdr.datalength));
248 309
310 lbtf_deb_usb2(&cardp->udev->dev, "Data length = %d\n",
311 le32_to_cpu(fwdata->hdr.datalength));
312
249 fwdata->seqnum = cpu_to_le32(++cardp->fwseqnum); 313 fwdata->seqnum = cpu_to_le32(++cardp->fwseqnum);
250 cardp->totalbytes += le32_to_cpu(fwdata->hdr.datalength); 314 cardp->totalbytes += le32_to_cpu(fwdata->hdr.datalength);
251 315
252 usb_tx_block(cardp, cardp->ep_out_buf, sizeof(struct fwdata) + 316 usb_tx_block(cardp, cardp->ep_out_buf, sizeof(struct fwdata) +
253 le32_to_cpu(fwdata->hdr.datalength), 0); 317 le32_to_cpu(fwdata->hdr.datalength), 0);
254 318
255 if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK)) 319 if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_DATA_TO_RECV)) {
320 lbtf_deb_usb2(&cardp->udev->dev, "There are data to follow\n");
321 lbtf_deb_usb2(&cardp->udev->dev, "seqnum = %d totalbytes = %d\n",
322 cardp->fwseqnum, cardp->totalbytes);
323 } else if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK)) {
324 lbtf_deb_usb2(&cardp->udev->dev, "Host has finished FW downloading\n");
325 lbtf_deb_usb2(&cardp->udev->dev, "Donwloading FW JUMP BLOCK\n");
326
256 /* Host has finished FW downloading 327 /* Host has finished FW downloading
257 * Donwloading FW JUMP BLOCK 328 * Donwloading FW JUMP BLOCK
258 */ 329 */
259 cardp->fwfinalblk = 1; 330 cardp->fwfinalblk = 1;
331 }
260 332
333 lbtf_deb_usb2(&cardp->udev->dev, "Firmware download done; size %d\n",
334 cardp->totalbytes);
335
336 lbtf_deb_leave(LBTF_DEB_FW);
261 return 0; 337 return 0;
262} 338}
263 339
@@ -266,6 +342,8 @@ static int if_usb_reset_device(struct if_usb_card *cardp)
266 struct cmd_ds_802_11_reset *cmd = cardp->ep_out_buf + 4; 342 struct cmd_ds_802_11_reset *cmd = cardp->ep_out_buf + 4;
267 int ret; 343 int ret;
268 344
345 lbtf_deb_enter(LBTF_DEB_USB);
346
269 *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST); 347 *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST);
270 348
271 cmd->hdr.command = cpu_to_le16(CMD_802_11_RESET); 349 cmd->hdr.command = cpu_to_le16(CMD_802_11_RESET);
@@ -280,6 +358,8 @@ static int if_usb_reset_device(struct if_usb_card *cardp)
280 ret = usb_reset_device(cardp->udev); 358 ret = usb_reset_device(cardp->udev);
281 msleep(100); 359 msleep(100);
282 360
361 lbtf_deb_leave_args(LBTF_DEB_USB, "ret %d", ret);
362
283 return ret; 363 return ret;
284} 364}
285EXPORT_SYMBOL_GPL(if_usb_reset_device); 365EXPORT_SYMBOL_GPL(if_usb_reset_device);
@@ -297,11 +377,15 @@ EXPORT_SYMBOL_GPL(if_usb_reset_device);
297static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload, 377static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
298 uint16_t nb, u8 data) 378 uint16_t nb, u8 data)
299{ 379{
380 int ret = -1;
300 struct urb *urb; 381 struct urb *urb;
301 382
383 lbtf_deb_enter(LBTF_DEB_USB);
302 /* check if device is removed */ 384 /* check if device is removed */
303 if (cardp->priv->surpriseremoved) 385 if (cardp->priv->surpriseremoved) {
304 return -1; 386 lbtf_deb_usbd(&cardp->udev->dev, "Device removed\n");
387 goto tx_ret;
388 }
305 389
306 if (data) 390 if (data)
307 urb = cardp->tx_urb; 391 urb = cardp->tx_urb;
@@ -315,19 +399,34 @@ static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
315 399
316 urb->transfer_flags |= URB_ZERO_PACKET; 400 urb->transfer_flags |= URB_ZERO_PACKET;
317 401
318 if (usb_submit_urb(urb, GFP_ATOMIC)) 402 if (usb_submit_urb(urb, GFP_ATOMIC)) {
319 return -1; 403 lbtf_deb_usbd(&cardp->udev->dev, "usb_submit_urb failed: %d\n", ret);
320 return 0; 404 goto tx_ret;
405 }
406
407 lbtf_deb_usb2(&cardp->udev->dev, "usb_submit_urb success\n");
408
409 ret = 0;
410
411tx_ret:
412 lbtf_deb_leave(LBTF_DEB_USB);
413 return ret;
321} 414}
322 415
323static int __if_usb_submit_rx_urb(struct if_usb_card *cardp, 416static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
324 void (*callbackfn)(struct urb *urb)) 417 void (*callbackfn)(struct urb *urb))
325{ 418{
326 struct sk_buff *skb; 419 struct sk_buff *skb;
420 int ret = -1;
421
422 lbtf_deb_enter(LBTF_DEB_USB);
327 423
328 skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE); 424 skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE);
329 if (!skb) 425 if (!skb) {
426 pr_err("No free skb\n");
427 lbtf_deb_leave(LBTF_DEB_USB);
330 return -1; 428 return -1;
429 }
331 430
332 cardp->rx_skb = skb; 431 cardp->rx_skb = skb;
333 432
@@ -339,12 +438,19 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
339 438
340 cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET; 439 cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
341 440
342 if (usb_submit_urb(cardp->rx_urb, GFP_ATOMIC)) { 441 lbtf_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n", cardp->rx_urb);
442 ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC);
443 if (ret) {
444 lbtf_deb_usbd(&cardp->udev->dev, "Submit Rx URB failed: %d\n", ret);
343 kfree_skb(skb); 445 kfree_skb(skb);
344 cardp->rx_skb = NULL; 446 cardp->rx_skb = NULL;
447 lbtf_deb_leave(LBTF_DEB_USB);
345 return -1; 448 return -1;
346 } else 449 } else {
450 lbtf_deb_usb2(&cardp->udev->dev, "Submit Rx URB success\n");
451 lbtf_deb_leave(LBTF_DEB_USB);
347 return 0; 452 return 0;
453 }
348} 454}
349 455
350static int if_usb_submit_rx_urb_fwload(struct if_usb_card *cardp) 456static int if_usb_submit_rx_urb_fwload(struct if_usb_card *cardp)
@@ -364,8 +470,12 @@ static void if_usb_receive_fwload(struct urb *urb)
364 struct fwsyncheader *syncfwheader; 470 struct fwsyncheader *syncfwheader;
365 struct bootcmdresp bcmdresp; 471 struct bootcmdresp bcmdresp;
366 472
473 lbtf_deb_enter(LBTF_DEB_USB);
367 if (urb->status) { 474 if (urb->status) {
475 lbtf_deb_usbd(&cardp->udev->dev,
476 "URB status is failed during fw load\n");
368 kfree_skb(skb); 477 kfree_skb(skb);
478 lbtf_deb_leave(LBTF_DEB_USB);
369 return; 479 return;
370 } 480 }
371 481
@@ -373,12 +483,17 @@ static void if_usb_receive_fwload(struct urb *urb)
373 __le32 *tmp = (__le32 *)(skb->data); 483 __le32 *tmp = (__le32 *)(skb->data);
374 484
375 if (tmp[0] == cpu_to_le32(CMD_TYPE_INDICATION) && 485 if (tmp[0] == cpu_to_le32(CMD_TYPE_INDICATION) &&
376 tmp[1] == cpu_to_le32(MACREG_INT_CODE_FIRMWARE_READY)) 486 tmp[1] == cpu_to_le32(MACREG_INT_CODE_FIRMWARE_READY)) {
377 /* Firmware ready event received */ 487 /* Firmware ready event received */
488 pr_info("Firmware ready event received\n");
378 wake_up(&cardp->fw_wq); 489 wake_up(&cardp->fw_wq);
379 else 490 } else {
491 lbtf_deb_usb("Waiting for confirmation; got %x %x\n",
492 le32_to_cpu(tmp[0]), le32_to_cpu(tmp[1]));
380 if_usb_submit_rx_urb_fwload(cardp); 493 if_usb_submit_rx_urb_fwload(cardp);
494 }
381 kfree_skb(skb); 495 kfree_skb(skb);
496 lbtf_deb_leave(LBTF_DEB_USB);
382 return; 497 return;
383 } 498 }
384 if (cardp->bootcmdresp <= 0) { 499 if (cardp->bootcmdresp <= 0) {
@@ -389,34 +504,60 @@ static void if_usb_receive_fwload(struct urb *urb)
389 if_usb_submit_rx_urb_fwload(cardp); 504 if_usb_submit_rx_urb_fwload(cardp);
390 cardp->bootcmdresp = 1; 505 cardp->bootcmdresp = 1;
391 /* Received valid boot command response */ 506 /* Received valid boot command response */
507 lbtf_deb_usbd(&cardp->udev->dev,
508 "Received valid boot command response\n");
509 lbtf_deb_leave(LBTF_DEB_USB);
392 return; 510 return;
393 } 511 }
394 if (bcmdresp.magic != cpu_to_le32(BOOT_CMD_MAGIC_NUMBER)) { 512 if (bcmdresp.magic != cpu_to_le32(BOOT_CMD_MAGIC_NUMBER)) {
395 if (bcmdresp.magic == cpu_to_le32(CMD_TYPE_REQUEST) || 513 if (bcmdresp.magic == cpu_to_le32(CMD_TYPE_REQUEST) ||
396 bcmdresp.magic == cpu_to_le32(CMD_TYPE_DATA) || 514 bcmdresp.magic == cpu_to_le32(CMD_TYPE_DATA) ||
397 bcmdresp.magic == cpu_to_le32(CMD_TYPE_INDICATION)) 515 bcmdresp.magic == cpu_to_le32(CMD_TYPE_INDICATION)) {
516 if (!cardp->bootcmdresp)
517 pr_info("Firmware already seems alive; resetting\n");
398 cardp->bootcmdresp = -1; 518 cardp->bootcmdresp = -1;
399 } else if (bcmdresp.cmd == BOOT_CMD_FW_BY_USB && 519 } else {
400 bcmdresp.result == BOOT_CMD_RESP_OK) 520 pr_info("boot cmd response wrong magic number (0x%x)\n",
521 le32_to_cpu(bcmdresp.magic));
522 }
523 } else if (bcmdresp.cmd != BOOT_CMD_FW_BY_USB) {
524 pr_info("boot cmd response cmd_tag error (%d)\n",
525 bcmdresp.cmd);
526 } else if (bcmdresp.result != BOOT_CMD_RESP_OK) {
527 pr_info("boot cmd response result error (%d)\n",
528 bcmdresp.result);
529 } else {
401 cardp->bootcmdresp = 1; 530 cardp->bootcmdresp = 1;
531 lbtf_deb_usbd(&cardp->udev->dev,
532 "Received valid boot command response\n");
533 }
402 534
403 kfree_skb(skb); 535 kfree_skb(skb);
404 if_usb_submit_rx_urb_fwload(cardp); 536 if_usb_submit_rx_urb_fwload(cardp);
537 lbtf_deb_leave(LBTF_DEB_USB);
405 return; 538 return;
406 } 539 }
407 540
408 syncfwheader = kmalloc(sizeof(struct fwsyncheader), GFP_ATOMIC); 541 syncfwheader = kmalloc(sizeof(struct fwsyncheader), GFP_ATOMIC);
409 if (!syncfwheader) { 542 if (!syncfwheader) {
543 lbtf_deb_usbd(&cardp->udev->dev, "Failure to allocate syncfwheader\n");
410 kfree_skb(skb); 544 kfree_skb(skb);
545 lbtf_deb_leave(LBTF_DEB_USB);
411 return; 546 return;
412 } 547 }
413 548
414 memcpy(syncfwheader, skb->data, sizeof(struct fwsyncheader)); 549 memcpy(syncfwheader, skb->data, sizeof(struct fwsyncheader));
415 550
416 if (!syncfwheader->cmd) 551 if (!syncfwheader->cmd) {
552 lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk with correct CRC\n");
553 lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk seqnum = %d\n",
554 le32_to_cpu(syncfwheader->seqnum));
417 cardp->CRC_OK = 1; 555 cardp->CRC_OK = 1;
418 else 556 } else {
557 lbtf_deb_usbd(&cardp->udev->dev, "FW received Blk with CRC error\n");
419 cardp->CRC_OK = 0; 558 cardp->CRC_OK = 0;
559 }
560
420 kfree_skb(skb); 561 kfree_skb(skb);
421 562
422 /* reschedule timer for 200ms hence */ 563 /* reschedule timer for 200ms hence */
@@ -434,6 +575,7 @@ static void if_usb_receive_fwload(struct urb *urb)
434 575
435 kfree(syncfwheader); 576 kfree(syncfwheader);
436 577
578 lbtf_deb_leave(LBTF_DEB_USB);
437 return; 579 return;
438} 580}
439 581
@@ -445,6 +587,7 @@ static inline void process_cmdtypedata(int recvlength, struct sk_buff *skb,
445{ 587{
446 if (recvlength > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE + MESSAGE_HEADER_LEN 588 if (recvlength > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE + MESSAGE_HEADER_LEN
447 || recvlength < MRVDRV_MIN_PKT_LEN) { 589 || recvlength < MRVDRV_MIN_PKT_LEN) {
590 lbtf_deb_usbd(&cardp->udev->dev, "Packet length is Invalid\n");
448 kfree_skb(skb); 591 kfree_skb(skb);
449 return; 592 return;
450 } 593 }
@@ -460,6 +603,8 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
460 struct lbtf_private *priv) 603 struct lbtf_private *priv)
461{ 604{
462 if (recvlength > LBS_CMD_BUFFER_SIZE) { 605 if (recvlength > LBS_CMD_BUFFER_SIZE) {
606 lbtf_deb_usbd(&cardp->udev->dev,
607 "The receive buffer is too large\n");
463 kfree_skb(skb); 608 kfree_skb(skb);
464 return; 609 return;
465 } 610 }
@@ -489,16 +634,24 @@ static void if_usb_receive(struct urb *urb)
489 uint32_t recvtype = 0; 634 uint32_t recvtype = 0;
490 __le32 *pkt = (__le32 *) skb->data; 635 __le32 *pkt = (__le32 *) skb->data;
491 636
637 lbtf_deb_enter(LBTF_DEB_USB);
638
492 if (recvlength) { 639 if (recvlength) {
493 if (urb->status) { 640 if (urb->status) {
641 lbtf_deb_usbd(&cardp->udev->dev, "RX URB failed: %d\n",
642 urb->status);
494 kfree_skb(skb); 643 kfree_skb(skb);
495 goto setup_for_next; 644 goto setup_for_next;
496 } 645 }
497 646
498 recvbuff = skb->data; 647 recvbuff = skb->data;
499 recvtype = le32_to_cpu(pkt[0]); 648 recvtype = le32_to_cpu(pkt[0]);
649 lbtf_deb_usbd(&cardp->udev->dev,
650 "Recv length = 0x%x, Recv type = 0x%X\n",
651 recvlength, recvtype);
500 } else if (urb->status) { 652 } else if (urb->status) {
501 kfree_skb(skb); 653 kfree_skb(skb);
654 lbtf_deb_leave(LBTF_DEB_USB);
502 return; 655 return;
503 } 656 }
504 657
@@ -515,6 +668,7 @@ static void if_usb_receive(struct urb *urb)
515 { 668 {
516 /* Event cause handling */ 669 /* Event cause handling */
517 u32 event_cause = le32_to_cpu(pkt[1]); 670 u32 event_cause = le32_to_cpu(pkt[1]);
671 lbtf_deb_usbd(&cardp->udev->dev, "**EVENT** 0x%X\n", event_cause);
518 672
519 /* Icky undocumented magic special case */ 673 /* Icky undocumented magic special case */
520 if (event_cause & 0xffff0000) { 674 if (event_cause & 0xffff0000) {
@@ -529,21 +683,22 @@ static void if_usb_receive(struct urb *urb)
529 } else if (event_cause == LBTF_EVENT_BCN_SENT) 683 } else if (event_cause == LBTF_EVENT_BCN_SENT)
530 lbtf_bcn_sent(priv); 684 lbtf_bcn_sent(priv);
531 else 685 else
532 printk(KERN_DEBUG 686 lbtf_deb_usbd(&cardp->udev->dev,
533 "Unsupported notification %d received\n", 687 "Unsupported notification %d received\n",
534 event_cause); 688 event_cause);
535 kfree_skb(skb); 689 kfree_skb(skb);
536 break; 690 break;
537 } 691 }
538 default: 692 default:
539 printk(KERN_DEBUG "libertastf: unknown command type 0x%X\n", 693 lbtf_deb_usbd(&cardp->udev->dev,
540 recvtype); 694 "libertastf: unknown command type 0x%X\n", recvtype);
541 kfree_skb(skb); 695 kfree_skb(skb);
542 break; 696 break;
543 } 697 }
544 698
545setup_for_next: 699setup_for_next:
546 if_usb_submit_rx_urb(cardp); 700 if_usb_submit_rx_urb(cardp);
701 lbtf_deb_leave(LBTF_DEB_USB);
547} 702}
548 703
549/** 704/**
@@ -562,6 +717,9 @@ static int if_usb_host_to_card(struct lbtf_private *priv, uint8_t type,
562 struct if_usb_card *cardp = priv->card; 717 struct if_usb_card *cardp = priv->card;
563 u8 data = 0; 718 u8 data = 0;
564 719
720 lbtf_deb_usbd(&cardp->udev->dev, "*** type = %u\n", type);
721 lbtf_deb_usbd(&cardp->udev->dev, "size after = %d\n", nb);
722
565 if (type == MVMS_CMD) { 723 if (type == MVMS_CMD) {
566 *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST); 724 *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST);
567 } else { 725 } else {
@@ -639,8 +797,10 @@ static int check_fwfile_format(const u8 *data, u32 totlen)
639 } while (!exit); 797 } while (!exit);
640 798
641 if (ret) 799 if (ret)
642 printk(KERN_INFO 800 pr_err("firmware file format check FAIL\n");
643 "libertastf: firmware file format check failed\n"); 801 else
802 lbtf_deb_fw("firmware file format check PASS\n");
803
644 return ret; 804 return ret;
645} 805}
646 806
@@ -651,10 +811,12 @@ static int if_usb_prog_firmware(struct if_usb_card *cardp)
651 static int reset_count = 10; 811 static int reset_count = 10;
652 int ret = 0; 812 int ret = 0;
653 813
814 lbtf_deb_enter(LBTF_DEB_USB);
815
654 ret = request_firmware(&cardp->fw, lbtf_fw_name, &cardp->udev->dev); 816 ret = request_firmware(&cardp->fw, lbtf_fw_name, &cardp->udev->dev);
655 if (ret < 0) { 817 if (ret < 0) {
656 printk(KERN_INFO "libertastf: firmware %s not found\n", 818 pr_err("request_firmware() failed with %#x\n", ret);
657 lbtf_fw_name); 819 pr_err("firmware %s not found\n", lbtf_fw_name);
658 goto done; 820 goto done;
659 } 821 }
660 822
@@ -663,6 +825,7 @@ static int if_usb_prog_firmware(struct if_usb_card *cardp)
663 825
664restart: 826restart:
665 if (if_usb_submit_rx_urb_fwload(cardp) < 0) { 827 if (if_usb_submit_rx_urb_fwload(cardp) < 0) {
828 lbtf_deb_usbd(&cardp->udev->dev, "URB submission is failed\n");
666 ret = -1; 829 ret = -1;
667 goto release_fw; 830 goto release_fw;
668 } 831 }
@@ -709,14 +872,13 @@ restart:
709 usb_kill_urb(cardp->rx_urb); 872 usb_kill_urb(cardp->rx_urb);
710 873
711 if (!cardp->fwdnldover) { 874 if (!cardp->fwdnldover) {
712 printk(KERN_INFO "libertastf: failed to load fw," 875 pr_info("failed to load fw, resetting device!\n");
713 " resetting device!\n");
714 if (--reset_count >= 0) { 876 if (--reset_count >= 0) {
715 if_usb_reset_device(cardp); 877 if_usb_reset_device(cardp);
716 goto restart; 878 goto restart;
717 } 879 }
718 880
719 printk(KERN_INFO "libertastf: fw download failure\n"); 881 pr_info("FW download failure, time = %d ms\n", i * 100);
720 ret = -1; 882 ret = -1;
721 goto release_fw; 883 goto release_fw;
722 } 884 }
@@ -730,6 +892,7 @@ restart:
730 if_usb_setup_firmware(cardp->priv); 892 if_usb_setup_firmware(cardp->priv);
731 893
732 done: 894 done:
895 lbtf_deb_leave_args(LBTF_DEB_USB, "ret %d", ret);
733 return ret; 896 return ret;
734} 897}
735EXPORT_SYMBOL_GPL(if_usb_prog_firmware); 898EXPORT_SYMBOL_GPL(if_usb_prog_firmware);
@@ -751,13 +914,19 @@ static int __init if_usb_init_module(void)
751{ 914{
752 int ret = 0; 915 int ret = 0;
753 916
917 lbtf_deb_enter(LBTF_DEB_MAIN);
918
754 ret = usb_register(&if_usb_driver); 919 ret = usb_register(&if_usb_driver);
920
921 lbtf_deb_leave_args(LBTF_DEB_MAIN, "ret %d", ret);
755 return ret; 922 return ret;
756} 923}
757 924
758static void __exit if_usb_exit_module(void) 925static void __exit if_usb_exit_module(void)
759{ 926{
927 lbtf_deb_enter(LBTF_DEB_MAIN);
760 usb_deregister(&if_usb_driver); 928 usb_deregister(&if_usb_driver);
929 lbtf_deb_leave(LBTF_DEB_MAIN);
761} 930}
762 931
763module_init(if_usb_init_module); 932module_init(if_usb_init_module);
diff --git a/drivers/net/wireless/libertas_tf/libertas_tf.h b/drivers/net/wireless/libertas_tf/libertas_tf.h
index 4cc42dd5a005..fbbaaae7a1ae 100644
--- a/drivers/net/wireless/libertas_tf/libertas_tf.h
+++ b/drivers/net/wireless/libertas_tf/libertas_tf.h
@@ -13,6 +13,8 @@
13#include <linux/kthread.h> 13#include <linux/kthread.h>
14#include <net/mac80211.h> 14#include <net/mac80211.h>
15 15
16#include "deb_defs.h"
17
16#ifndef DRV_NAME 18#ifndef DRV_NAME
17#define DRV_NAME "libertas_tf" 19#define DRV_NAME "libertas_tf"
18#endif 20#endif
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 7533a23e0500..60787de56f3a 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -7,10 +7,12 @@
7 * the Free Software Foundation; either version 2 of the License, or (at 7 * the Free Software Foundation; either version 2 of the License, or (at
8 * your option) any later version. 8 * your option) any later version.
9 */ 9 */
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
10#include <linux/slab.h> 12#include <linux/slab.h>
11 13
14#include <linux/etherdevice.h>
12#include "libertas_tf.h" 15#include "libertas_tf.h"
13#include "linux/etherdevice.h"
14 16
15#define DRIVER_RELEASE_VERSION "004.p0" 17#define DRIVER_RELEASE_VERSION "004.p0"
16/* thinfirm version: 5.132.X.pX */ 18/* thinfirm version: 5.132.X.pX */
@@ -18,7 +20,17 @@
18#define LBTF_FW_VER_MAX 0x0584ffff 20#define LBTF_FW_VER_MAX 0x0584ffff
19#define QOS_CONTROL_LEN 2 21#define QOS_CONTROL_LEN 2
20 22
21static const char lbtf_driver_version[] = "THINFIRM-USB8388-" DRIVER_RELEASE_VERSION; 23/* Module parameters */
24unsigned int lbtf_debug;
25EXPORT_SYMBOL_GPL(lbtf_debug);
26module_param_named(libertas_tf_debug, lbtf_debug, int, 0644);
27
28static const char lbtf_driver_version[] = "THINFIRM-USB8388-" DRIVER_RELEASE_VERSION
29#ifdef DEBUG
30 "-dbg"
31#endif
32 "";
33
22struct workqueue_struct *lbtf_wq; 34struct workqueue_struct *lbtf_wq;
23 35
24static const struct ieee80211_channel lbtf_channels[] = { 36static const struct ieee80211_channel lbtf_channels[] = {
@@ -81,6 +93,9 @@ static void lbtf_cmd_work(struct work_struct *work)
81{ 93{
82 struct lbtf_private *priv = container_of(work, struct lbtf_private, 94 struct lbtf_private *priv = container_of(work, struct lbtf_private,
83 cmd_work); 95 cmd_work);
96
97 lbtf_deb_enter(LBTF_DEB_CMD);
98
84 spin_lock_irq(&priv->driver_lock); 99 spin_lock_irq(&priv->driver_lock);
85 /* command response? */ 100 /* command response? */
86 if (priv->cmd_response_rxed) { 101 if (priv->cmd_response_rxed) {
@@ -108,11 +123,16 @@ static void lbtf_cmd_work(struct work_struct *work)
108 priv->cmd_timed_out = 0; 123 priv->cmd_timed_out = 0;
109 spin_unlock_irq(&priv->driver_lock); 124 spin_unlock_irq(&priv->driver_lock);
110 125
111 if (!priv->fw_ready) 126 if (!priv->fw_ready) {
127 lbtf_deb_leave_args(LBTF_DEB_CMD, "fw not ready");
112 return; 128 return;
129 }
130
113 /* Execute the next command */ 131 /* Execute the next command */
114 if (!priv->cur_cmd) 132 if (!priv->cur_cmd)
115 lbtf_execute_next_command(priv); 133 lbtf_execute_next_command(priv);
134
135 lbtf_deb_leave(LBTF_DEB_CMD);
116} 136}
117 137
118/** 138/**
@@ -126,6 +146,7 @@ static int lbtf_setup_firmware(struct lbtf_private *priv)
126{ 146{
127 int ret = -1; 147 int ret = -1;
128 148
149 lbtf_deb_enter(LBTF_DEB_FW);
129 /* 150 /*
130 * Read priv address from HW 151 * Read priv address from HW
131 */ 152 */
@@ -141,6 +162,7 @@ static int lbtf_setup_firmware(struct lbtf_private *priv)
141 162
142 ret = 0; 163 ret = 0;
143done: 164done:
165 lbtf_deb_leave_args(LBTF_DEB_FW, "ret: %d", ret);
144 return ret; 166 return ret;
145} 167}
146 168
@@ -152,6 +174,7 @@ static void command_timer_fn(unsigned long data)
152{ 174{
153 struct lbtf_private *priv = (struct lbtf_private *)data; 175 struct lbtf_private *priv = (struct lbtf_private *)data;
154 unsigned long flags; 176 unsigned long flags;
177 lbtf_deb_enter(LBTF_DEB_CMD);
155 178
156 spin_lock_irqsave(&priv->driver_lock, flags); 179 spin_lock_irqsave(&priv->driver_lock, flags);
157 180
@@ -168,10 +191,12 @@ static void command_timer_fn(unsigned long data)
168 queue_work(lbtf_wq, &priv->cmd_work); 191 queue_work(lbtf_wq, &priv->cmd_work);
169out: 192out:
170 spin_unlock_irqrestore(&priv->driver_lock, flags); 193 spin_unlock_irqrestore(&priv->driver_lock, flags);
194 lbtf_deb_leave(LBTF_DEB_CMD);
171} 195}
172 196
173static int lbtf_init_adapter(struct lbtf_private *priv) 197static int lbtf_init_adapter(struct lbtf_private *priv)
174{ 198{
199 lbtf_deb_enter(LBTF_DEB_MAIN);
175 memset(priv->current_addr, 0xff, ETH_ALEN); 200 memset(priv->current_addr, 0xff, ETH_ALEN);
176 mutex_init(&priv->lock); 201 mutex_init(&priv->lock);
177 202
@@ -188,13 +213,16 @@ static int lbtf_init_adapter(struct lbtf_private *priv)
188 if (lbtf_allocate_cmd_buffer(priv)) 213 if (lbtf_allocate_cmd_buffer(priv))
189 return -1; 214 return -1;
190 215
216 lbtf_deb_leave(LBTF_DEB_MAIN);
191 return 0; 217 return 0;
192} 218}
193 219
194static void lbtf_free_adapter(struct lbtf_private *priv) 220static void lbtf_free_adapter(struct lbtf_private *priv)
195{ 221{
222 lbtf_deb_enter(LBTF_DEB_MAIN);
196 lbtf_free_cmd_buffer(priv); 223 lbtf_free_cmd_buffer(priv);
197 del_timer(&priv->command_timer); 224 del_timer(&priv->command_timer);
225 lbtf_deb_leave(LBTF_DEB_MAIN);
198} 226}
199 227
200static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 228static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
@@ -221,14 +249,18 @@ static void lbtf_tx_work(struct work_struct *work)
221 struct sk_buff *skb = NULL; 249 struct sk_buff *skb = NULL;
222 int err; 250 int err;
223 251
252 lbtf_deb_enter(LBTF_DEB_MACOPS | LBTF_DEB_TX);
253
224 if ((priv->vif->type == NL80211_IFTYPE_AP) && 254 if ((priv->vif->type == NL80211_IFTYPE_AP) &&
225 (!skb_queue_empty(&priv->bc_ps_buf))) 255 (!skb_queue_empty(&priv->bc_ps_buf)))
226 skb = skb_dequeue(&priv->bc_ps_buf); 256 skb = skb_dequeue(&priv->bc_ps_buf);
227 else if (priv->skb_to_tx) { 257 else if (priv->skb_to_tx) {
228 skb = priv->skb_to_tx; 258 skb = priv->skb_to_tx;
229 priv->skb_to_tx = NULL; 259 priv->skb_to_tx = NULL;
230 } else 260 } else {
261 lbtf_deb_leave(LBTF_DEB_MACOPS | LBTF_DEB_TX);
231 return; 262 return;
263 }
232 264
233 len = skb->len; 265 len = skb->len;
234 info = IEEE80211_SKB_CB(skb); 266 info = IEEE80211_SKB_CB(skb);
@@ -236,6 +268,7 @@ static void lbtf_tx_work(struct work_struct *work)
236 268
237 if (priv->surpriseremoved) { 269 if (priv->surpriseremoved) {
238 dev_kfree_skb_any(skb); 270 dev_kfree_skb_any(skb);
271 lbtf_deb_leave(LBTF_DEB_MACOPS | LBTF_DEB_TX);
239 return; 272 return;
240 } 273 }
241 274
@@ -249,6 +282,7 @@ static void lbtf_tx_work(struct work_struct *work)
249 ETH_ALEN); 282 ETH_ALEN);
250 txpd->tx_packet_length = cpu_to_le16(len); 283 txpd->tx_packet_length = cpu_to_le16(len);
251 txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd)); 284 txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd));
285 lbtf_deb_hex(LBTF_DEB_TX, "TX Data", skb->data, min_t(unsigned int, skb->len, 100));
252 BUG_ON(priv->tx_skb); 286 BUG_ON(priv->tx_skb);
253 spin_lock_irq(&priv->driver_lock); 287 spin_lock_irq(&priv->driver_lock);
254 priv->tx_skb = skb; 288 priv->tx_skb = skb;
@@ -257,7 +291,9 @@ static void lbtf_tx_work(struct work_struct *work)
257 if (err) { 291 if (err) {
258 dev_kfree_skb_any(skb); 292 dev_kfree_skb_any(skb);
259 priv->tx_skb = NULL; 293 priv->tx_skb = NULL;
294 pr_err("TX error: %d", err);
260 } 295 }
296 lbtf_deb_leave(LBTF_DEB_MACOPS | LBTF_DEB_TX);
261} 297}
262 298
263static int lbtf_op_start(struct ieee80211_hw *hw) 299static int lbtf_op_start(struct ieee80211_hw *hw)
@@ -266,6 +302,8 @@ static int lbtf_op_start(struct ieee80211_hw *hw)
266 void *card = priv->card; 302 void *card = priv->card;
267 int ret = -1; 303 int ret = -1;
268 304
305 lbtf_deb_enter(LBTF_DEB_MACOPS);
306
269 if (!priv->fw_ready) 307 if (!priv->fw_ready)
270 /* Upload firmware */ 308 /* Upload firmware */
271 if (priv->hw_prog_firmware(card)) 309 if (priv->hw_prog_firmware(card))
@@ -286,10 +324,12 @@ static int lbtf_op_start(struct ieee80211_hw *hw)
286 } 324 }
287 325
288 printk(KERN_INFO "libertastf: Marvell WLAN 802.11 thinfirm adapter\n"); 326 printk(KERN_INFO "libertastf: Marvell WLAN 802.11 thinfirm adapter\n");
327 lbtf_deb_leave(LBTF_DEB_MACOPS);
289 return 0; 328 return 0;
290 329
291err_prog_firmware: 330err_prog_firmware:
292 priv->hw_reset_device(card); 331 priv->hw_reset_device(card);
332 lbtf_deb_leave_args(LBTF_DEB_MACOPS, "error programing fw; ret=%d", ret);
293 return ret; 333 return ret;
294} 334}
295 335
@@ -300,6 +340,9 @@ static void lbtf_op_stop(struct ieee80211_hw *hw)
300 struct sk_buff *skb; 340 struct sk_buff *skb;
301 341
302 struct cmd_ctrl_node *cmdnode; 342 struct cmd_ctrl_node *cmdnode;
343
344 lbtf_deb_enter(LBTF_DEB_MACOPS);
345
303 /* Flush pending command nodes */ 346 /* Flush pending command nodes */
304 spin_lock_irqsave(&priv->driver_lock, flags); 347 spin_lock_irqsave(&priv->driver_lock, flags);
305 list_for_each_entry(cmdnode, &priv->cmdpendingq, list) { 348 list_for_each_entry(cmdnode, &priv->cmdpendingq, list) {
@@ -316,6 +359,7 @@ static void lbtf_op_stop(struct ieee80211_hw *hw)
316 priv->radioon = RADIO_OFF; 359 priv->radioon = RADIO_OFF;
317 lbtf_set_radio_control(priv); 360 lbtf_set_radio_control(priv);
318 361
362 lbtf_deb_leave(LBTF_DEB_MACOPS);
319 return; 363 return;
320} 364}
321 365
@@ -323,6 +367,7 @@ static int lbtf_op_add_interface(struct ieee80211_hw *hw,
323 struct ieee80211_vif *vif) 367 struct ieee80211_vif *vif)
324{ 368{
325 struct lbtf_private *priv = hw->priv; 369 struct lbtf_private *priv = hw->priv;
370 lbtf_deb_enter(LBTF_DEB_MACOPS);
326 if (priv->vif != NULL) 371 if (priv->vif != NULL)
327 return -EOPNOTSUPP; 372 return -EOPNOTSUPP;
328 373
@@ -340,6 +385,7 @@ static int lbtf_op_add_interface(struct ieee80211_hw *hw,
340 return -EOPNOTSUPP; 385 return -EOPNOTSUPP;
341 } 386 }
342 lbtf_set_mac_address(priv, (u8 *) vif->addr); 387 lbtf_set_mac_address(priv, (u8 *) vif->addr);
388 lbtf_deb_leave(LBTF_DEB_MACOPS);
343 return 0; 389 return 0;
344} 390}
345 391
@@ -347,6 +393,7 @@ static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
347 struct ieee80211_vif *vif) 393 struct ieee80211_vif *vif)
348{ 394{
349 struct lbtf_private *priv = hw->priv; 395 struct lbtf_private *priv = hw->priv;
396 lbtf_deb_enter(LBTF_DEB_MACOPS);
350 397
351 if (priv->vif->type == NL80211_IFTYPE_AP || 398 if (priv->vif->type == NL80211_IFTYPE_AP ||
352 priv->vif->type == NL80211_IFTYPE_MESH_POINT) 399 priv->vif->type == NL80211_IFTYPE_MESH_POINT)
@@ -354,17 +401,20 @@ static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
354 lbtf_set_mode(priv, LBTF_PASSIVE_MODE); 401 lbtf_set_mode(priv, LBTF_PASSIVE_MODE);
355 lbtf_set_bssid(priv, 0, NULL); 402 lbtf_set_bssid(priv, 0, NULL);
356 priv->vif = NULL; 403 priv->vif = NULL;
404 lbtf_deb_leave(LBTF_DEB_MACOPS);
357} 405}
358 406
359static int lbtf_op_config(struct ieee80211_hw *hw, u32 changed) 407static int lbtf_op_config(struct ieee80211_hw *hw, u32 changed)
360{ 408{
361 struct lbtf_private *priv = hw->priv; 409 struct lbtf_private *priv = hw->priv;
362 struct ieee80211_conf *conf = &hw->conf; 410 struct ieee80211_conf *conf = &hw->conf;
411 lbtf_deb_enter(LBTF_DEB_MACOPS);
363 412
364 if (conf->channel->center_freq != priv->cur_freq) { 413 if (conf->channel->center_freq != priv->cur_freq) {
365 priv->cur_freq = conf->channel->center_freq; 414 priv->cur_freq = conf->channel->center_freq;
366 lbtf_set_channel(priv, conf->channel->hw_value); 415 lbtf_set_channel(priv, conf->channel->hw_value);
367 } 416 }
417 lbtf_deb_leave(LBTF_DEB_MACOPS);
368 return 0; 418 return 0;
369} 419}
370 420
@@ -395,11 +445,16 @@ static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
395{ 445{
396 struct lbtf_private *priv = hw->priv; 446 struct lbtf_private *priv = hw->priv;
397 int old_mac_control = priv->mac_control; 447 int old_mac_control = priv->mac_control;
448
449 lbtf_deb_enter(LBTF_DEB_MACOPS);
450
398 changed_flags &= SUPPORTED_FIF_FLAGS; 451 changed_flags &= SUPPORTED_FIF_FLAGS;
399 *new_flags &= SUPPORTED_FIF_FLAGS; 452 *new_flags &= SUPPORTED_FIF_FLAGS;
400 453
401 if (!changed_flags) 454 if (!changed_flags) {
455 lbtf_deb_leave(LBTF_DEB_MACOPS);
402 return; 456 return;
457 }
403 458
404 if (*new_flags & (FIF_PROMISC_IN_BSS)) 459 if (*new_flags & (FIF_PROMISC_IN_BSS))
405 priv->mac_control |= CMD_ACT_MAC_PROMISCUOUS_ENABLE; 460 priv->mac_control |= CMD_ACT_MAC_PROMISCUOUS_ENABLE;
@@ -425,6 +480,8 @@ static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
425 480
426 if (priv->mac_control != old_mac_control) 481 if (priv->mac_control != old_mac_control)
427 lbtf_set_mac_control(priv); 482 lbtf_set_mac_control(priv);
483
484 lbtf_deb_leave(LBTF_DEB_MACOPS);
428} 485}
429 486
430static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw, 487static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
@@ -434,6 +491,7 @@ static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
434{ 491{
435 struct lbtf_private *priv = hw->priv; 492 struct lbtf_private *priv = hw->priv;
436 struct sk_buff *beacon; 493 struct sk_buff *beacon;
494 lbtf_deb_enter(LBTF_DEB_MACOPS);
437 495
438 if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_INT)) { 496 if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_INT)) {
439 switch (priv->vif->type) { 497 switch (priv->vif->type) {
@@ -464,6 +522,8 @@ static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
464 priv->preamble = CMD_TYPE_LONG_PREAMBLE; 522 priv->preamble = CMD_TYPE_LONG_PREAMBLE;
465 lbtf_set_radio_control(priv); 523 lbtf_set_radio_control(priv);
466 } 524 }
525
526 lbtf_deb_leave(LBTF_DEB_MACOPS);
467} 527}
468 528
469static const struct ieee80211_ops lbtf_ops = { 529static const struct ieee80211_ops lbtf_ops = {
@@ -486,6 +546,8 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
486 unsigned int flags; 546 unsigned int flags;
487 struct ieee80211_hdr *hdr; 547 struct ieee80211_hdr *hdr;
488 548
549 lbtf_deb_enter(LBTF_DEB_RX);
550
489 prxpd = (struct rxpd *) skb->data; 551 prxpd = (struct rxpd *) skb->data;
490 552
491 stats.flag = 0; 553 stats.flag = 0;
@@ -494,7 +556,6 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
494 stats.freq = priv->cur_freq; 556 stats.freq = priv->cur_freq;
495 stats.band = IEEE80211_BAND_2GHZ; 557 stats.band = IEEE80211_BAND_2GHZ;
496 stats.signal = prxpd->snr; 558 stats.signal = prxpd->snr;
497 stats.noise = prxpd->nf;
498 /* Marvell rate index has a hole at value 4 */ 559 /* Marvell rate index has a hole at value 4 */
499 if (prxpd->rx_rate > 4) 560 if (prxpd->rx_rate > 4)
500 --prxpd->rx_rate; 561 --prxpd->rx_rate;
@@ -516,7 +577,15 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
516 } 577 }
517 578
518 memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats)); 579 memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats));
580
581 lbtf_deb_rx("rx data: skb->len-sizeof(RxPd) = %d-%zd = %zd\n",
582 skb->len, sizeof(struct rxpd), skb->len - sizeof(struct rxpd));
583 lbtf_deb_hex(LBTF_DEB_RX, "RX Data", skb->data,
584 min_t(unsigned int, skb->len, 100));
585
519 ieee80211_rx_irqsafe(priv->hw, skb); 586 ieee80211_rx_irqsafe(priv->hw, skb);
587
588 lbtf_deb_leave(LBTF_DEB_RX);
520 return 0; 589 return 0;
521} 590}
522EXPORT_SYMBOL_GPL(lbtf_rx); 591EXPORT_SYMBOL_GPL(lbtf_rx);
@@ -533,6 +602,8 @@ struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
533 struct ieee80211_hw *hw; 602 struct ieee80211_hw *hw;
534 struct lbtf_private *priv = NULL; 603 struct lbtf_private *priv = NULL;
535 604
605 lbtf_deb_enter(LBTF_DEB_MAIN);
606
536 hw = ieee80211_alloc_hw(sizeof(struct lbtf_private), &lbtf_ops); 607 hw = ieee80211_alloc_hw(sizeof(struct lbtf_private), &lbtf_ops);
537 if (!hw) 608 if (!hw)
538 goto done; 609 goto done;
@@ -575,6 +646,7 @@ err_init_adapter:
575 priv = NULL; 646 priv = NULL;
576 647
577done: 648done:
649 lbtf_deb_leave_args(LBTF_DEB_MAIN, "priv %p", priv);
578 return priv; 650 return priv;
579} 651}
580EXPORT_SYMBOL_GPL(lbtf_add_card); 652EXPORT_SYMBOL_GPL(lbtf_add_card);
@@ -584,6 +656,8 @@ int lbtf_remove_card(struct lbtf_private *priv)
584{ 656{
585 struct ieee80211_hw *hw = priv->hw; 657 struct ieee80211_hw *hw = priv->hw;
586 658
659 lbtf_deb_enter(LBTF_DEB_MAIN);
660
587 priv->surpriseremoved = 1; 661 priv->surpriseremoved = 1;
588 del_timer(&priv->command_timer); 662 del_timer(&priv->command_timer);
589 lbtf_free_adapter(priv); 663 lbtf_free_adapter(priv);
@@ -591,6 +665,7 @@ int lbtf_remove_card(struct lbtf_private *priv)
591 ieee80211_unregister_hw(hw); 665 ieee80211_unregister_hw(hw);
592 ieee80211_free_hw(hw); 666 ieee80211_free_hw(hw);
593 667
668 lbtf_deb_leave(LBTF_DEB_MAIN);
594 return 0; 669 return 0;
595} 670}
596EXPORT_SYMBOL_GPL(lbtf_remove_card); 671EXPORT_SYMBOL_GPL(lbtf_remove_card);
@@ -649,17 +724,21 @@ EXPORT_SYMBOL_GPL(lbtf_bcn_sent);
649 724
650static int __init lbtf_init_module(void) 725static int __init lbtf_init_module(void)
651{ 726{
727 lbtf_deb_enter(LBTF_DEB_MAIN);
652 lbtf_wq = create_workqueue("libertastf"); 728 lbtf_wq = create_workqueue("libertastf");
653 if (lbtf_wq == NULL) { 729 if (lbtf_wq == NULL) {
654 printk(KERN_ERR "libertastf: couldn't create workqueue\n"); 730 printk(KERN_ERR "libertastf: couldn't create workqueue\n");
655 return -ENOMEM; 731 return -ENOMEM;
656 } 732 }
733 lbtf_deb_leave(LBTF_DEB_MAIN);
657 return 0; 734 return 0;
658} 735}
659 736
660static void __exit lbtf_exit_module(void) 737static void __exit lbtf_exit_module(void)
661{ 738{
739 lbtf_deb_enter(LBTF_DEB_MAIN);
662 destroy_workqueue(lbtf_wq); 740 destroy_workqueue(lbtf_wq);
741 lbtf_deb_leave(LBTF_DEB_MAIN);
663} 742}
664 743
665module_init(lbtf_init_module); 744module_init(lbtf_init_module);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index dfff02f5c86d..9fd2beadb6f5 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -830,6 +830,33 @@ static int mac80211_hwsim_conf_tx(
830 return 0; 830 return 0;
831} 831}
832 832
833static int mac80211_hwsim_get_survey(
834 struct ieee80211_hw *hw, int idx,
835 struct survey_info *survey)
836{
837 struct ieee80211_conf *conf = &hw->conf;
838
839 printk(KERN_DEBUG "%s:%s (idx=%d)\n",
840 wiphy_name(hw->wiphy), __func__, idx);
841
842 if (idx != 0)
843 return -ENOENT;
844
845 /* Current channel */
846 survey->channel = conf->channel;
847
848 /*
849 * Magically conjured noise level --- this is only ok for simulated hardware.
850 *
851 * A real driver which cannot determine the real channel noise MUST NOT
852 * report any noise, especially not a magically conjured one :-)
853 */
854 survey->filled = SURVEY_INFO_NOISE_DBM;
855 survey->noise = -92;
856
857 return 0;
858}
859
833#ifdef CONFIG_NL80211_TESTMODE 860#ifdef CONFIG_NL80211_TESTMODE
834/* 861/*
835 * This section contains example code for using netlink 862 * This section contains example code for using netlink
@@ -947,6 +974,7 @@ static void hw_scan_done(struct work_struct *work)
947} 974}
948 975
949static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw, 976static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
977 struct ieee80211_vif *vif,
950 struct cfg80211_scan_request *req) 978 struct cfg80211_scan_request *req)
951{ 979{
952 struct hw_scan_done *hsd = kzalloc(sizeof(*hsd), GFP_KERNEL); 980 struct hw_scan_done *hsd = kzalloc(sizeof(*hsd), GFP_KERNEL);
@@ -993,7 +1021,7 @@ static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw)
993 mutex_lock(&hwsim->mutex); 1021 mutex_lock(&hwsim->mutex);
994 1022
995 printk(KERN_DEBUG "hwsim sw_scan_complete\n"); 1023 printk(KERN_DEBUG "hwsim sw_scan_complete\n");
996 hwsim->scanning = true; 1024 hwsim->scanning = false;
997 1025
998 mutex_unlock(&hwsim->mutex); 1026 mutex_unlock(&hwsim->mutex);
999} 1027}
@@ -1013,6 +1041,7 @@ static struct ieee80211_ops mac80211_hwsim_ops =
1013 .sta_notify = mac80211_hwsim_sta_notify, 1041 .sta_notify = mac80211_hwsim_sta_notify,
1014 .set_tim = mac80211_hwsim_set_tim, 1042 .set_tim = mac80211_hwsim_set_tim,
1015 .conf_tx = mac80211_hwsim_conf_tx, 1043 .conf_tx = mac80211_hwsim_conf_tx,
1044 .get_survey = mac80211_hwsim_get_survey,
1016 CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd) 1045 CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd)
1017 .ampdu_action = mac80211_hwsim_ampdu_action, 1046 .ampdu_action = mac80211_hwsim_ampdu_action,
1018 .sw_scan_start = mac80211_hwsim_sw_scan, 1047 .sw_scan_start = mac80211_hwsim_sw_scan,
@@ -1271,7 +1300,8 @@ static int __init init_mac80211_hwsim(void)
1271 hw->flags = IEEE80211_HW_MFP_CAPABLE | 1300 hw->flags = IEEE80211_HW_MFP_CAPABLE |
1272 IEEE80211_HW_SIGNAL_DBM | 1301 IEEE80211_HW_SIGNAL_DBM |
1273 IEEE80211_HW_SUPPORTS_STATIC_SMPS | 1302 IEEE80211_HW_SUPPORTS_STATIC_SMPS |
1274 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS; 1303 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
1304 IEEE80211_HW_AMPDU_AGGREGATION;
1275 1305
1276 /* ask mac80211 to reserve space for magic */ 1306 /* ask mac80211 to reserve space for magic */
1277 hw->vif_data_size = sizeof(struct hwsim_vif_priv); 1307 hw->vif_data_size = sizeof(struct hwsim_vif_priv);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 73bbd080c6e7..808adb909095 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -750,7 +750,6 @@ mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
750 memset(status, 0, sizeof(*status)); 750 memset(status, 0, sizeof(*status));
751 751
752 status->signal = -rxd->rssi; 752 status->signal = -rxd->rssi;
753 status->noise = -rxd->noise_floor;
754 753
755 if (rxd->rate & MWL8K_8366_AP_RATE_INFO_MCS_FORMAT) { 754 if (rxd->rate & MWL8K_8366_AP_RATE_INFO_MCS_FORMAT) {
756 status->flag |= RX_FLAG_HT; 755 status->flag |= RX_FLAG_HT;
@@ -852,7 +851,6 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
852 memset(status, 0, sizeof(*status)); 851 memset(status, 0, sizeof(*status));
853 852
854 status->signal = -rxd->rssi; 853 status->signal = -rxd->rssi;
855 status->noise = -rxd->noise_level;
856 status->antenna = MWL8K_STA_RATE_INFO_ANTSELECT(rate_info); 854 status->antenna = MWL8K_STA_RATE_INFO_ANTSELECT(rate_info);
857 status->rate_idx = MWL8K_STA_RATE_INFO_RATEID(rate_info); 855 status->rate_idx = MWL8K_STA_RATE_INFO_RATEID(rate_info);
858 856
@@ -3984,8 +3982,8 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3984 3982
3985 hw->queues = MWL8K_TX_QUEUES; 3983 hw->queues = MWL8K_TX_QUEUES;
3986 3984
3987 /* Set rssi and noise values to dBm */ 3985 /* Set rssi values to dBm */
3988 hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM; 3986 hw->flags |= IEEE80211_HW_SIGNAL_DBM;
3989 hw->vif_data_size = sizeof(struct mwl8k_vif); 3987 hw->vif_data_size = sizeof(struct mwl8k_vif);
3990 hw->sta_data_size = sizeof(struct mwl8k_sta); 3988 hw->sta_data_size = sizeof(struct mwl8k_sta);
3991 3989
diff --git a/drivers/net/wireless/orinoco/Kconfig b/drivers/net/wireless/orinoco/Kconfig
index 6116b546861d..60819bcf4377 100644
--- a/drivers/net/wireless/orinoco/Kconfig
+++ b/drivers/net/wireless/orinoco/Kconfig
@@ -132,3 +132,10 @@ config PCMCIA_SPECTRUM
132 This driver requires firmware download on startup. Utilities 132 This driver requires firmware download on startup. Utilities
133 for downloading Symbol firmware are available at 133 for downloading Symbol firmware are available at
134 <http://sourceforge.net/projects/orinoco/> 134 <http://sourceforge.net/projects/orinoco/>
135
136config ORINOCO_USB
137 tristate "Agere Orinoco USB support"
138 depends on USB && HERMES
139 select FW_LOADER
140 ---help---
141 This driver is for USB versions of the Agere Orinoco card.
diff --git a/drivers/net/wireless/orinoco/Makefile b/drivers/net/wireless/orinoco/Makefile
index 9abd6329bcbd..bfdefb85abcd 100644
--- a/drivers/net/wireless/orinoco/Makefile
+++ b/drivers/net/wireless/orinoco/Makefile
@@ -11,3 +11,7 @@ obj-$(CONFIG_PCI_HERMES) += orinoco_pci.o
11obj-$(CONFIG_TMD_HERMES) += orinoco_tmd.o 11obj-$(CONFIG_TMD_HERMES) += orinoco_tmd.o
12obj-$(CONFIG_NORTEL_HERMES) += orinoco_nortel.o 12obj-$(CONFIG_NORTEL_HERMES) += orinoco_nortel.o
13obj-$(CONFIG_PCMCIA_SPECTRUM) += spectrum_cs.o 13obj-$(CONFIG_PCMCIA_SPECTRUM) += spectrum_cs.o
14obj-$(CONFIG_ORINOCO_USB) += orinoco_usb.o
15
16# Orinoco should be endian clean.
17ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/orinoco/airport.c b/drivers/net/wireless/orinoco/airport.c
index c60df2c1aca3..9bcee10c9308 100644
--- a/drivers/net/wireless/orinoco/airport.c
+++ b/drivers/net/wireless/orinoco/airport.c
@@ -77,9 +77,9 @@ airport_resume(struct macio_dev *mdev)
77 77
78 enable_irq(card->irq); 78 enable_irq(card->irq);
79 79
80 spin_lock_irqsave(&priv->lock, flags); 80 priv->hw.ops->lock_irqsave(&priv->lock, &flags);
81 err = orinoco_up(priv); 81 err = orinoco_up(priv);
82 spin_unlock_irqrestore(&priv->lock, flags); 82 priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
83 83
84 return err; 84 return err;
85} 85}
@@ -195,7 +195,7 @@ airport_attach(struct macio_dev *mdev, const struct of_device_id *match)
195 ssleep(1); 195 ssleep(1);
196 196
197 /* Reset it before we get the interrupt */ 197 /* Reset it before we get the interrupt */
198 hermes_init(hw); 198 hw->ops->init(hw);
199 199
200 if (request_irq(card->irq, orinoco_interrupt, 0, DRIVER_NAME, priv)) { 200 if (request_irq(card->irq, orinoco_interrupt, 0, DRIVER_NAME, priv)) {
201 printk(KERN_ERR PFX "Couldn't get IRQ %d\n", card->irq); 201 printk(KERN_ERR PFX "Couldn't get IRQ %d\n", card->irq);
@@ -210,7 +210,7 @@ airport_attach(struct macio_dev *mdev, const struct of_device_id *match)
210 } 210 }
211 211
212 /* Register an interface with the stack */ 212 /* Register an interface with the stack */
213 if (orinoco_if_add(priv, phys_addr, card->irq) != 0) { 213 if (orinoco_if_add(priv, phys_addr, card->irq, NULL) != 0) {
214 printk(KERN_ERR PFX "orinoco_if_add() failed\n"); 214 printk(KERN_ERR PFX "orinoco_if_add() failed\n");
215 goto failed; 215 goto failed;
216 } 216 }
diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/orinoco/cfg.c
index 27f2d3342645..81d228de9e5d 100644
--- a/drivers/net/wireless/orinoco/cfg.c
+++ b/drivers/net/wireless/orinoco/cfg.c
@@ -88,7 +88,9 @@ int orinoco_wiphy_register(struct wiphy *wiphy)
88 88
89 wiphy->rts_threshold = priv->rts_thresh; 89 wiphy->rts_threshold = priv->rts_thresh;
90 if (!priv->has_mwo) 90 if (!priv->has_mwo)
91 wiphy->frag_threshold = priv->frag_thresh; 91 wiphy->frag_threshold = priv->frag_thresh + 1;
92 wiphy->retry_short = priv->short_retry_limit;
93 wiphy->retry_long = priv->long_retry_limit;
92 94
93 return wiphy_register(wiphy); 95 return wiphy_register(wiphy);
94} 96}
@@ -187,7 +189,7 @@ static int orinoco_set_channel(struct wiphy *wiphy,
187 if (priv->iw_mode == NL80211_IFTYPE_MONITOR) { 189 if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
188 /* Fast channel change - no commit if successful */ 190 /* Fast channel change - no commit if successful */
189 hermes_t *hw = &priv->hw; 191 hermes_t *hw = &priv->hw;
190 err = hermes_docmd_wait(hw, HERMES_CMD_TEST | 192 err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
191 HERMES_TEST_SET_CHANNEL, 193 HERMES_TEST_SET_CHANNEL,
192 channel, NULL); 194 channel, NULL);
193 } 195 }
@@ -196,8 +198,92 @@ static int orinoco_set_channel(struct wiphy *wiphy,
196 return err; 198 return err;
197} 199}
198 200
201static int orinoco_set_wiphy_params(struct wiphy *wiphy, u32 changed)
202{
203 struct orinoco_private *priv = wiphy_priv(wiphy);
204 int frag_value = -1;
205 int rts_value = -1;
206 int err = 0;
207
208 if (changed & WIPHY_PARAM_RETRY_SHORT) {
209 /* Setting short retry not supported */
210 err = -EINVAL;
211 }
212
213 if (changed & WIPHY_PARAM_RETRY_LONG) {
214 /* Setting long retry not supported */
215 err = -EINVAL;
216 }
217
218 if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
219 /* Set fragmentation */
220 if (priv->has_mwo) {
221 if (wiphy->frag_threshold < 0)
222 frag_value = 0;
223 else {
224 printk(KERN_WARNING "%s: Fixed fragmentation "
225 "is not supported on this firmware. "
226 "Using MWO robust instead.\n",
227 priv->ndev->name);
228 frag_value = 1;
229 }
230 } else {
231 if (wiphy->frag_threshold < 0)
232 frag_value = 2346;
233 else if ((wiphy->frag_threshold < 257) ||
234 (wiphy->frag_threshold > 2347))
235 err = -EINVAL;
236 else
237 /* cfg80211 value is 257-2347 (odd only)
238 * orinoco rid has range 256-2346 (even only) */
239 frag_value = wiphy->frag_threshold & ~0x1;
240 }
241 }
242
243 if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
244 /* Set RTS.
245 *
246 * Prism documentation suggests default of 2432,
247 * and a range of 0-3000.
248 *
249 * Current implementation uses 2347 as the default and
250 * the upper limit.
251 */
252
253 if (wiphy->rts_threshold < 0)
254 rts_value = 2347;
255 else if (wiphy->rts_threshold > 2347)
256 err = -EINVAL;
257 else
258 rts_value = wiphy->rts_threshold;
259 }
260
261 if (!err) {
262 unsigned long flags;
263
264 if (orinoco_lock(priv, &flags) != 0)
265 return -EBUSY;
266
267 if (frag_value >= 0) {
268 if (priv->has_mwo)
269 priv->mwo_robust = frag_value;
270 else
271 priv->frag_thresh = frag_value;
272 }
273 if (rts_value >= 0)
274 priv->rts_thresh = rts_value;
275
276 err = orinoco_commit(priv);
277
278 orinoco_unlock(priv, &flags);
279 }
280
281 return err;
282}
283
199const struct cfg80211_ops orinoco_cfg_ops = { 284const struct cfg80211_ops orinoco_cfg_ops = {
200 .change_virtual_intf = orinoco_change_vif, 285 .change_virtual_intf = orinoco_change_vif,
201 .set_channel = orinoco_set_channel, 286 .set_channel = orinoco_set_channel,
202 .scan = orinoco_scan, 287 .scan = orinoco_scan,
288 .set_wiphy_params = orinoco_set_wiphy_params,
203}; 289};
diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/orinoco/fw.c
index 5ea0f7cf85b1..3e1947d097ca 100644
--- a/drivers/net/wireless/orinoco/fw.c
+++ b/drivers/net/wireless/orinoco/fw.c
@@ -122,7 +122,7 @@ orinoco_dl_firmware(struct orinoco_private *priv,
122 dev_dbg(dev, "Attempting to download firmware %s\n", firmware); 122 dev_dbg(dev, "Attempting to download firmware %s\n", firmware);
123 123
124 /* Read current plug data */ 124 /* Read current plug data */
125 err = hermes_read_pda(hw, pda, fw->pda_addr, fw->pda_size, 0); 125 err = hw->ops->read_pda(hw, pda, fw->pda_addr, fw->pda_size);
126 dev_dbg(dev, "Read PDA returned %d\n", err); 126 dev_dbg(dev, "Read PDA returned %d\n", err);
127 if (err) 127 if (err)
128 goto free; 128 goto free;
@@ -149,7 +149,7 @@ orinoco_dl_firmware(struct orinoco_private *priv,
149 } 149 }
150 150
151 /* Enable aux port to allow programming */ 151 /* Enable aux port to allow programming */
152 err = hermesi_program_init(hw, le32_to_cpu(hdr->entry_point)); 152 err = hw->ops->program_init(hw, le32_to_cpu(hdr->entry_point));
153 dev_dbg(dev, "Program init returned %d\n", err); 153 dev_dbg(dev, "Program init returned %d\n", err);
154 if (err != 0) 154 if (err != 0)
155 goto abort; 155 goto abort;
@@ -177,7 +177,7 @@ orinoco_dl_firmware(struct orinoco_private *priv,
177 goto abort; 177 goto abort;
178 178
179 /* Tell card we've finished */ 179 /* Tell card we've finished */
180 err = hermesi_program_end(hw); 180 err = hw->ops->program_end(hw);
181 dev_dbg(dev, "Program end returned %d\n", err); 181 dev_dbg(dev, "Program end returned %d\n", err);
182 if (err != 0) 182 if (err != 0)
183 goto abort; 183 goto abort;
@@ -224,7 +224,7 @@ symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
224 if (!pda) 224 if (!pda)
225 return -ENOMEM; 225 return -ENOMEM;
226 226
227 ret = hermes_read_pda(hw, pda, fw->pda_addr, fw->pda_size, 1); 227 ret = hw->ops->read_pda(hw, pda, fw->pda_addr, fw->pda_size);
228 if (ret) 228 if (ret)
229 goto free; 229 goto free;
230 } 230 }
@@ -260,7 +260,7 @@ symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
260 } 260 }
261 261
262 /* Reset hermes chip and make sure it responds */ 262 /* Reset hermes chip and make sure it responds */
263 ret = hermes_init(hw); 263 ret = hw->ops->init(hw);
264 264
265 /* hermes_reset() should return 0 with the secondary firmware */ 265 /* hermes_reset() should return 0 with the secondary firmware */
266 if (secondary && ret != 0) 266 if (secondary && ret != 0)
diff --git a/drivers/net/wireless/orinoco/hermes.c b/drivers/net/wireless/orinoco/hermes.c
index 1a2fca76fd3c..6c6a23e08df6 100644
--- a/drivers/net/wireless/orinoco/hermes.c
+++ b/drivers/net/wireless/orinoco/hermes.c
@@ -52,6 +52,26 @@
52#define ALLOC_COMPL_TIMEOUT (1000) /* in iterations of ~10us */ 52#define ALLOC_COMPL_TIMEOUT (1000) /* in iterations of ~10us */
53 53
54/* 54/*
55 * AUX port access. To unlock the AUX port write the access keys to the
56 * PARAM0-2 registers, then write HERMES_AUX_ENABLE to the HERMES_CONTROL
57 * register. Then read it and make sure it's HERMES_AUX_ENABLED.
58 */
59#define HERMES_AUX_ENABLE 0x8000 /* Enable auxiliary port access */
60#define HERMES_AUX_DISABLE 0x4000 /* Disable to auxiliary port access */
61#define HERMES_AUX_ENABLED 0xC000 /* Auxiliary port is open */
62#define HERMES_AUX_DISABLED 0x0000 /* Auxiliary port is closed */
63
64#define HERMES_AUX_PW0 0xFE01
65#define HERMES_AUX_PW1 0xDC23
66#define HERMES_AUX_PW2 0xBA45
67
68/* HERMES_CMD_DOWNLD */
69#define HERMES_PROGRAM_DISABLE (0x0000 | HERMES_CMD_DOWNLD)
70#define HERMES_PROGRAM_ENABLE_VOLATILE (0x0100 | HERMES_CMD_DOWNLD)
71#define HERMES_PROGRAM_ENABLE_NON_VOLATILE (0x0200 | HERMES_CMD_DOWNLD)
72#define HERMES_PROGRAM_NON_VOLATILE (0x0300 | HERMES_CMD_DOWNLD)
73
74/*
55 * Debugging helpers 75 * Debugging helpers
56 */ 76 */
57 77
@@ -70,6 +90,7 @@
70 90
71#endif /* ! HERMES_DEBUG */ 91#endif /* ! HERMES_DEBUG */
72 92
93static const struct hermes_ops hermes_ops_local;
73 94
74/* 95/*
75 * Internal functions 96 * Internal functions
@@ -111,9 +132,9 @@ static int hermes_issue_cmd(hermes_t *hw, u16 cmd, u16 param0,
111 */ 132 */
112 133
113/* For doing cmds that wipe the magic constant in SWSUPPORT0 */ 134/* For doing cmds that wipe the magic constant in SWSUPPORT0 */
114int hermes_doicmd_wait(hermes_t *hw, u16 cmd, 135static int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
115 u16 parm0, u16 parm1, u16 parm2, 136 u16 parm0, u16 parm1, u16 parm2,
116 struct hermes_response *resp) 137 struct hermes_response *resp)
117{ 138{
118 int err = 0; 139 int err = 0;
119 int k; 140 int k;
@@ -163,17 +184,18 @@ int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
163out: 184out:
164 return err; 185 return err;
165} 186}
166EXPORT_SYMBOL(hermes_doicmd_wait);
167 187
168void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing) 188void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing)
169{ 189{
170 hw->iobase = address; 190 hw->iobase = address;
171 hw->reg_spacing = reg_spacing; 191 hw->reg_spacing = reg_spacing;
172 hw->inten = 0x0; 192 hw->inten = 0x0;
193 hw->eeprom_pda = false;
194 hw->ops = &hermes_ops_local;
173} 195}
174EXPORT_SYMBOL(hermes_struct_init); 196EXPORT_SYMBOL(hermes_struct_init);
175 197
176int hermes_init(hermes_t *hw) 198static int hermes_init(hermes_t *hw)
177{ 199{
178 u16 reg; 200 u16 reg;
179 int err = 0; 201 int err = 0;
@@ -217,7 +239,6 @@ int hermes_init(hermes_t *hw)
217 239
218 return err; 240 return err;
219} 241}
220EXPORT_SYMBOL(hermes_init);
221 242
222/* Issue a command to the chip, and (busy!) wait for it to 243/* Issue a command to the chip, and (busy!) wait for it to
223 * complete. 244 * complete.
@@ -228,8 +249,8 @@ EXPORT_SYMBOL(hermes_init);
228 * > 0 on error returned by the firmware 249 * > 0 on error returned by the firmware
229 * 250 *
230 * Callable from any context, but locking is your problem. */ 251 * Callable from any context, but locking is your problem. */
231int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0, 252static int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
232 struct hermes_response *resp) 253 struct hermes_response *resp)
233{ 254{
234 int err; 255 int err;
235 int k; 256 int k;
@@ -291,9 +312,8 @@ int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
291 out: 312 out:
292 return err; 313 return err;
293} 314}
294EXPORT_SYMBOL(hermes_docmd_wait);
295 315
296int hermes_allocate(hermes_t *hw, u16 size, u16 *fid) 316static int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
297{ 317{
298 int err = 0; 318 int err = 0;
299 int k; 319 int k;
@@ -333,7 +353,6 @@ int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
333 353
334 return 0; 354 return 0;
335} 355}
336EXPORT_SYMBOL(hermes_allocate);
337 356
338/* Set up a BAP to read a particular chunk of data from card's internal buffer. 357/* Set up a BAP to read a particular chunk of data from card's internal buffer.
339 * 358 *
@@ -403,8 +422,8 @@ static int hermes_bap_seek(hermes_t *hw, int bap, u16 id, u16 offset)
403 * 0 on success 422 * 0 on success
404 * > 0 on error from firmware 423 * > 0 on error from firmware
405 */ 424 */
406int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len, 425static int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
407 u16 id, u16 offset) 426 u16 id, u16 offset)
408{ 427{
409 int dreg = bap ? HERMES_DATA1 : HERMES_DATA0; 428 int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
410 int err = 0; 429 int err = 0;
@@ -422,7 +441,6 @@ int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
422 out: 441 out:
423 return err; 442 return err;
424} 443}
425EXPORT_SYMBOL(hermes_bap_pread);
426 444
427/* Write a block of data to the chip's buffer, via the 445/* Write a block of data to the chip's buffer, via the
428 * BAP. Synchronization/serialization is the caller's problem. 446 * BAP. Synchronization/serialization is the caller's problem.
@@ -432,8 +450,8 @@ EXPORT_SYMBOL(hermes_bap_pread);
432 * 0 on success 450 * 0 on success
433 * > 0 on error from firmware 451 * > 0 on error from firmware
434 */ 452 */
435int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len, 453static int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
436 u16 id, u16 offset) 454 u16 id, u16 offset)
437{ 455{
438 int dreg = bap ? HERMES_DATA1 : HERMES_DATA0; 456 int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
439 int err = 0; 457 int err = 0;
@@ -451,7 +469,6 @@ int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
451 out: 469 out:
452 return err; 470 return err;
453} 471}
454EXPORT_SYMBOL(hermes_bap_pwrite);
455 472
456/* Read a Length-Type-Value record from the card. 473/* Read a Length-Type-Value record from the card.
457 * 474 *
@@ -461,8 +478,8 @@ EXPORT_SYMBOL(hermes_bap_pwrite);
461 * practice. 478 * practice.
462 * 479 *
463 * Callable from user or bh context. */ 480 * Callable from user or bh context. */
464int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize, 481static int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
465 u16 *length, void *buf) 482 u16 *length, void *buf)
466{ 483{
467 int err = 0; 484 int err = 0;
468 int dreg = bap ? HERMES_DATA1 : HERMES_DATA0; 485 int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
@@ -505,10 +522,9 @@ int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
505 522
506 return 0; 523 return 0;
507} 524}
508EXPORT_SYMBOL(hermes_read_ltv);
509 525
510int hermes_write_ltv(hermes_t *hw, int bap, u16 rid, 526static int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
511 u16 length, const void *value) 527 u16 length, const void *value)
512{ 528{
513 int dreg = bap ? HERMES_DATA1 : HERMES_DATA0; 529 int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
514 int err = 0; 530 int err = 0;
@@ -533,4 +549,228 @@ int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
533 549
534 return err; 550 return err;
535} 551}
536EXPORT_SYMBOL(hermes_write_ltv); 552
553/*** Hermes AUX control ***/
554
555static inline void
556hermes_aux_setaddr(hermes_t *hw, u32 addr)
557{
558 hermes_write_reg(hw, HERMES_AUXPAGE, (u16) (addr >> 7));
559 hermes_write_reg(hw, HERMES_AUXOFFSET, (u16) (addr & 0x7F));
560}
561
562static inline int
563hermes_aux_control(hermes_t *hw, int enabled)
564{
565 int desired_state = enabled ? HERMES_AUX_ENABLED : HERMES_AUX_DISABLED;
566 int action = enabled ? HERMES_AUX_ENABLE : HERMES_AUX_DISABLE;
567 int i;
568
569 /* Already open? */
570 if (hermes_read_reg(hw, HERMES_CONTROL) == desired_state)
571 return 0;
572
573 hermes_write_reg(hw, HERMES_PARAM0, HERMES_AUX_PW0);
574 hermes_write_reg(hw, HERMES_PARAM1, HERMES_AUX_PW1);
575 hermes_write_reg(hw, HERMES_PARAM2, HERMES_AUX_PW2);
576 hermes_write_reg(hw, HERMES_CONTROL, action);
577
578 for (i = 0; i < 20; i++) {
579 udelay(10);
580 if (hermes_read_reg(hw, HERMES_CONTROL) ==
581 desired_state)
582 return 0;
583 }
584
585 return -EBUSY;
586}
587
588/*** Hermes programming ***/
589
590/* About to start programming data (Hermes I)
591 * offset is the entry point
592 *
593 * Spectrum_cs' Symbol fw does not require this
594 * wl_lkm Agere fw does
595 * Don't know about intersil
596 */
597static int hermesi_program_init(hermes_t *hw, u32 offset)
598{
599 int err;
600
601 /* Disable interrupts?*/
602 /*hw->inten = 0x0;*/
603 /*hermes_write_regn(hw, INTEN, 0);*/
604 /*hermes_set_irqmask(hw, 0);*/
605
606 /* Acknowledge any outstanding command */
607 hermes_write_regn(hw, EVACK, 0xFFFF);
608
609 /* Using init_cmd_wait rather than cmd_wait */
610 err = hw->ops->init_cmd_wait(hw,
611 0x0100 | HERMES_CMD_INIT,
612 0, 0, 0, NULL);
613 if (err)
614 return err;
615
616 err = hw->ops->init_cmd_wait(hw,
617 0x0000 | HERMES_CMD_INIT,
618 0, 0, 0, NULL);
619 if (err)
620 return err;
621
622 err = hermes_aux_control(hw, 1);
623 pr_debug("AUX enable returned %d\n", err);
624
625 if (err)
626 return err;
627
628 pr_debug("Enabling volatile, EP 0x%08x\n", offset);
629 err = hw->ops->init_cmd_wait(hw,
630 HERMES_PROGRAM_ENABLE_VOLATILE,
631 offset & 0xFFFFu,
632 offset >> 16,
633 0,
634 NULL);
635 pr_debug("PROGRAM_ENABLE returned %d\n", err);
636
637 return err;
638}
639
640/* Done programming data (Hermes I)
641 *
642 * Spectrum_cs' Symbol fw does not require this
643 * wl_lkm Agere fw does
644 * Don't know about intersil
645 */
646static int hermesi_program_end(hermes_t *hw)
647{
648 struct hermes_response resp;
649 int rc = 0;
650 int err;
651
652 rc = hw->ops->cmd_wait(hw, HERMES_PROGRAM_DISABLE, 0, &resp);
653
654 pr_debug("PROGRAM_DISABLE returned %d, "
655 "r0 0x%04x, r1 0x%04x, r2 0x%04x\n",
656 rc, resp.resp0, resp.resp1, resp.resp2);
657
658 if ((rc == 0) &&
659 ((resp.status & HERMES_STATUS_CMDCODE) != HERMES_CMD_DOWNLD))
660 rc = -EIO;
661
662 err = hermes_aux_control(hw, 0);
663 pr_debug("AUX disable returned %d\n", err);
664
665 /* Acknowledge any outstanding command */
666 hermes_write_regn(hw, EVACK, 0xFFFF);
667
668 /* Reinitialise, ignoring return */
669 (void) hw->ops->init_cmd_wait(hw, 0x0000 | HERMES_CMD_INIT,
670 0, 0, 0, NULL);
671
672 return rc ? rc : err;
673}
674
675static int hermes_program_bytes(struct hermes *hw, const char *data,
676 u32 addr, u32 len)
677{
678 /* wl lkm splits the programming into chunks of 2000 bytes.
679 * This restriction appears to come from USB. The PCMCIA
680 * adapters can program the whole lot in one go */
681 hermes_aux_setaddr(hw, addr);
682 hermes_write_bytes(hw, HERMES_AUXDATA, data, len);
683 return 0;
684}
685
686/* Read PDA from the adapter */
687static int hermes_read_pda(hermes_t *hw, __le16 *pda, u32 pda_addr, u16 pda_len)
688{
689 int ret;
690 u16 pda_size;
691 u16 data_len = pda_len;
692 __le16 *data = pda;
693
694 if (hw->eeprom_pda) {
695 /* PDA of spectrum symbol is in eeprom */
696
697 /* Issue command to read EEPROM */
698 ret = hw->ops->cmd_wait(hw, HERMES_CMD_READMIF, 0, NULL);
699 if (ret)
700 return ret;
701 } else {
702 /* wl_lkm does not include PDA size in the PDA area.
703 * We will pad the information into pda, so other routines
704 * don't have to be modified */
705 pda[0] = cpu_to_le16(pda_len - 2);
706 /* Includes CFG_PROD_DATA but not itself */
707 pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */
708 data_len = pda_len - 4;
709 data = pda + 2;
710 }
711
712 /* Open auxiliary port */
713 ret = hermes_aux_control(hw, 1);
714 pr_debug("AUX enable returned %d\n", ret);
715 if (ret)
716 return ret;
717
718 /* Read PDA */
719 hermes_aux_setaddr(hw, pda_addr);
720 hermes_read_words(hw, HERMES_AUXDATA, data, data_len / 2);
721
722 /* Close aux port */
723 ret = hermes_aux_control(hw, 0);
724 pr_debug("AUX disable returned %d\n", ret);
725
726 /* Check PDA length */
727 pda_size = le16_to_cpu(pda[0]);
728 pr_debug("Actual PDA length %d, Max allowed %d\n",
729 pda_size, pda_len);
730 if (pda_size > pda_len)
731 return -EINVAL;
732
733 return 0;
734}
735
736static void hermes_lock_irqsave(spinlock_t *lock,
737 unsigned long *flags) __acquires(lock)
738{
739 spin_lock_irqsave(lock, *flags);
740}
741
742static void hermes_unlock_irqrestore(spinlock_t *lock,
743 unsigned long *flags) __releases(lock)
744{
745 spin_unlock_irqrestore(lock, *flags);
746}
747
748static void hermes_lock_irq(spinlock_t *lock) __acquires(lock)
749{
750 spin_lock_irq(lock);
751}
752
753static void hermes_unlock_irq(spinlock_t *lock) __releases(lock)
754{
755 spin_unlock_irq(lock);
756}
757
758/* Hermes operations for local buses */
759static const struct hermes_ops hermes_ops_local = {
760 .init = hermes_init,
761 .cmd_wait = hermes_docmd_wait,
762 .init_cmd_wait = hermes_doicmd_wait,
763 .allocate = hermes_allocate,
764 .read_ltv = hermes_read_ltv,
765 .write_ltv = hermes_write_ltv,
766 .bap_pread = hermes_bap_pread,
767 .bap_pwrite = hermes_bap_pwrite,
768 .read_pda = hermes_read_pda,
769 .program_init = hermesi_program_init,
770 .program_end = hermesi_program_end,
771 .program = hermes_program_bytes,
772 .lock_irqsave = hermes_lock_irqsave,
773 .unlock_irqrestore = hermes_unlock_irqrestore,
774 .lock_irq = hermes_lock_irq,
775 .unlock_irq = hermes_unlock_irq,
776};
diff --git a/drivers/net/wireless/orinoco/hermes.h b/drivers/net/wireless/orinoco/hermes.h
index 2dddbb597c4d..9ca34e722b45 100644
--- a/drivers/net/wireless/orinoco/hermes.h
+++ b/drivers/net/wireless/orinoco/hermes.h
@@ -374,6 +374,37 @@ struct hermes_multicast {
374/* Timeouts */ 374/* Timeouts */
375#define HERMES_BAP_BUSY_TIMEOUT (10000) /* In iterations of ~1us */ 375#define HERMES_BAP_BUSY_TIMEOUT (10000) /* In iterations of ~1us */
376 376
377struct hermes;
378
379/* Functions to access hardware */
380struct hermes_ops {
381 int (*init)(struct hermes *hw);
382 int (*cmd_wait)(struct hermes *hw, u16 cmd, u16 parm0,
383 struct hermes_response *resp);
384 int (*init_cmd_wait)(struct hermes *hw, u16 cmd,
385 u16 parm0, u16 parm1, u16 parm2,
386 struct hermes_response *resp);
387 int (*allocate)(struct hermes *hw, u16 size, u16 *fid);
388 int (*read_ltv)(struct hermes *hw, int bap, u16 rid, unsigned buflen,
389 u16 *length, void *buf);
390 int (*write_ltv)(struct hermes *hw, int bap, u16 rid,
391 u16 length, const void *value);
392 int (*bap_pread)(struct hermes *hw, int bap, void *buf, int len,
393 u16 id, u16 offset);
394 int (*bap_pwrite)(struct hermes *hw, int bap, const void *buf,
395 int len, u16 id, u16 offset);
396 int (*read_pda)(struct hermes *hw, __le16 *pda,
397 u32 pda_addr, u16 pda_len);
398 int (*program_init)(struct hermes *hw, u32 entry_point);
399 int (*program_end)(struct hermes *hw);
400 int (*program)(struct hermes *hw, const char *buf,
401 u32 addr, u32 len);
402 void (*lock_irqsave)(spinlock_t *lock, unsigned long *flags);
403 void (*unlock_irqrestore)(spinlock_t *lock, unsigned long *flags);
404 void (*lock_irq)(spinlock_t *lock);
405 void (*unlock_irq)(spinlock_t *lock);
406};
407
377/* Basic control structure */ 408/* Basic control structure */
378typedef struct hermes { 409typedef struct hermes {
379 void __iomem *iobase; 410 void __iomem *iobase;
@@ -381,6 +412,9 @@ typedef struct hermes {
381#define HERMES_16BIT_REGSPACING 0 412#define HERMES_16BIT_REGSPACING 0
382#define HERMES_32BIT_REGSPACING 1 413#define HERMES_32BIT_REGSPACING 1
383 u16 inten; /* Which interrupts should be enabled? */ 414 u16 inten; /* Which interrupts should be enabled? */
415 bool eeprom_pda;
416 const struct hermes_ops *ops;
417 void *priv;
384} hermes_t; 418} hermes_t;
385 419
386/* Register access convenience macros */ 420/* Register access convenience macros */
@@ -394,22 +428,6 @@ typedef struct hermes {
394 428
395/* Function prototypes */ 429/* Function prototypes */
396void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing); 430void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing);
397int hermes_init(hermes_t *hw);
398int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
399 struct hermes_response *resp);
400int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
401 u16 parm0, u16 parm1, u16 parm2,
402 struct hermes_response *resp);
403int hermes_allocate(hermes_t *hw, u16 size, u16 *fid);
404
405int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
406 u16 id, u16 offset);
407int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
408 u16 id, u16 offset);
409int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned buflen,
410 u16 *length, void *buf);
411int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
412 u16 length, const void *value);
413 431
414/* Inline functions */ 432/* Inline functions */
415 433
@@ -426,13 +444,13 @@ static inline void hermes_set_irqmask(hermes_t *hw, u16 events)
426 444
427static inline int hermes_enable_port(hermes_t *hw, int port) 445static inline int hermes_enable_port(hermes_t *hw, int port)
428{ 446{
429 return hermes_docmd_wait(hw, HERMES_CMD_ENABLE | (port << 8), 447 return hw->ops->cmd_wait(hw, HERMES_CMD_ENABLE | (port << 8),
430 0, NULL); 448 0, NULL);
431} 449}
432 450
433static inline int hermes_disable_port(hermes_t *hw, int port) 451static inline int hermes_disable_port(hermes_t *hw, int port)
434{ 452{
435 return hermes_docmd_wait(hw, HERMES_CMD_DISABLE | (port << 8), 453 return hw->ops->cmd_wait(hw, HERMES_CMD_DISABLE | (port << 8),
436 0, NULL); 454 0, NULL);
437} 455}
438 456
@@ -440,7 +458,7 @@ static inline int hermes_disable_port(hermes_t *hw, int port)
440 * information frame in __orinoco_ev_info() */ 458 * information frame in __orinoco_ev_info() */
441static inline int hermes_inquire(hermes_t *hw, u16 rid) 459static inline int hermes_inquire(hermes_t *hw, u16 rid)
442{ 460{
443 return hermes_docmd_wait(hw, HERMES_CMD_INQUIRE, rid, NULL); 461 return hw->ops->cmd_wait(hw, HERMES_CMD_INQUIRE, rid, NULL);
444} 462}
445 463
446#define HERMES_BYTES_TO_RECLEN(n) ((((n)+1)/2) + 1) 464#define HERMES_BYTES_TO_RECLEN(n) ((((n)+1)/2) + 1)
@@ -475,10 +493,10 @@ static inline void hermes_clear_words(struct hermes *hw, int off,
475} 493}
476 494
477#define HERMES_READ_RECORD(hw, bap, rid, buf) \ 495#define HERMES_READ_RECORD(hw, bap, rid, buf) \
478 (hermes_read_ltv((hw), (bap), (rid), sizeof(*buf), NULL, (buf))) 496 (hw->ops->read_ltv((hw), (bap), (rid), sizeof(*buf), NULL, (buf)))
479#define HERMES_WRITE_RECORD(hw, bap, rid, buf) \ 497#define HERMES_WRITE_RECORD(hw, bap, rid, buf) \
480 (hermes_write_ltv((hw), (bap), (rid), \ 498 (hw->ops->write_ltv((hw), (bap), (rid), \
481 HERMES_BYTES_TO_RECLEN(sizeof(*buf)), (buf))) 499 HERMES_BYTES_TO_RECLEN(sizeof(*buf)), (buf)))
482 500
483static inline int hermes_read_wordrec(hermes_t *hw, int bap, u16 rid, u16 *word) 501static inline int hermes_read_wordrec(hermes_t *hw, int bap, u16 rid, u16 *word)
484{ 502{
diff --git a/drivers/net/wireless/orinoco/hermes_dld.c b/drivers/net/wireless/orinoco/hermes_dld.c
index fb157eb889ca..6da85e75fce0 100644
--- a/drivers/net/wireless/orinoco/hermes_dld.c
+++ b/drivers/net/wireless/orinoco/hermes_dld.c
@@ -46,37 +46,11 @@
46 46
47#define PFX "hermes_dld: " 47#define PFX "hermes_dld: "
48 48
49/*
50 * AUX port access. To unlock the AUX port write the access keys to the
51 * PARAM0-2 registers, then write HERMES_AUX_ENABLE to the HERMES_CONTROL
52 * register. Then read it and make sure it's HERMES_AUX_ENABLED.
53 */
54#define HERMES_AUX_ENABLE 0x8000 /* Enable auxiliary port access */
55#define HERMES_AUX_DISABLE 0x4000 /* Disable to auxiliary port access */
56#define HERMES_AUX_ENABLED 0xC000 /* Auxiliary port is open */
57#define HERMES_AUX_DISABLED 0x0000 /* Auxiliary port is closed */
58
59#define HERMES_AUX_PW0 0xFE01
60#define HERMES_AUX_PW1 0xDC23
61#define HERMES_AUX_PW2 0xBA45
62
63/* HERMES_CMD_DOWNLD */
64#define HERMES_PROGRAM_DISABLE (0x0000 | HERMES_CMD_DOWNLD)
65#define HERMES_PROGRAM_ENABLE_VOLATILE (0x0100 | HERMES_CMD_DOWNLD)
66#define HERMES_PROGRAM_ENABLE_NON_VOLATILE (0x0200 | HERMES_CMD_DOWNLD)
67#define HERMES_PROGRAM_NON_VOLATILE (0x0300 | HERMES_CMD_DOWNLD)
68
69/* End markers used in dblocks */ 49/* End markers used in dblocks */
70#define PDI_END 0x00000000 /* End of PDA */ 50#define PDI_END 0x00000000 /* End of PDA */
71#define BLOCK_END 0xFFFFFFFF /* Last image block */ 51#define BLOCK_END 0xFFFFFFFF /* Last image block */
72#define TEXT_END 0x1A /* End of text header */ 52#define TEXT_END 0x1A /* End of text header */
73 53
74/* Limit the amout we try to download in a single shot.
75 * Size is in bytes.
76 */
77#define MAX_DL_SIZE 1024
78#define LIMIT_PROGRAM_SIZE 0
79
80/* 54/*
81 * The following structures have little-endian fields denoted by 55 * The following structures have little-endian fields denoted by
82 * the leading underscore. Don't access them directly - use inline 56 * the leading underscore. Don't access them directly - use inline
@@ -165,41 +139,6 @@ pdi_len(const struct pdi *pdi)
165 return 2 * (le16_to_cpu(pdi->len) - 1); 139 return 2 * (le16_to_cpu(pdi->len) - 1);
166} 140}
167 141
168/*** Hermes AUX control ***/
169
170static inline void
171hermes_aux_setaddr(hermes_t *hw, u32 addr)
172{
173 hermes_write_reg(hw, HERMES_AUXPAGE, (u16) (addr >> 7));
174 hermes_write_reg(hw, HERMES_AUXOFFSET, (u16) (addr & 0x7F));
175}
176
177static inline int
178hermes_aux_control(hermes_t *hw, int enabled)
179{
180 int desired_state = enabled ? HERMES_AUX_ENABLED : HERMES_AUX_DISABLED;
181 int action = enabled ? HERMES_AUX_ENABLE : HERMES_AUX_DISABLE;
182 int i;
183
184 /* Already open? */
185 if (hermes_read_reg(hw, HERMES_CONTROL) == desired_state)
186 return 0;
187
188 hermes_write_reg(hw, HERMES_PARAM0, HERMES_AUX_PW0);
189 hermes_write_reg(hw, HERMES_PARAM1, HERMES_AUX_PW1);
190 hermes_write_reg(hw, HERMES_PARAM2, HERMES_AUX_PW2);
191 hermes_write_reg(hw, HERMES_CONTROL, action);
192
193 for (i = 0; i < 20; i++) {
194 udelay(10);
195 if (hermes_read_reg(hw, HERMES_CONTROL) ==
196 desired_state)
197 return 0;
198 }
199
200 return -EBUSY;
201}
202
203/*** Plug Data Functions ***/ 142/*** Plug Data Functions ***/
204 143
205/* 144/*
@@ -271,62 +210,7 @@ hermes_plug_pdi(hermes_t *hw, const struct pdr *first_pdr,
271 return -EINVAL; 210 return -EINVAL;
272 211
273 /* do the actual plugging */ 212 /* do the actual plugging */
274 hermes_aux_setaddr(hw, pdr_addr(pdr)); 213 hw->ops->program(hw, pdi->data, pdr_addr(pdr), pdi_len(pdi));
275 hermes_write_bytes(hw, HERMES_AUXDATA, pdi->data, pdi_len(pdi));
276
277 return 0;
278}
279
280/* Read PDA from the adapter */
281int hermes_read_pda(hermes_t *hw,
282 __le16 *pda,
283 u32 pda_addr,
284 u16 pda_len,
285 int use_eeprom) /* can we get this into hw? */
286{
287 int ret;
288 u16 pda_size;
289 u16 data_len = pda_len;
290 __le16 *data = pda;
291
292 if (use_eeprom) {
293 /* PDA of spectrum symbol is in eeprom */
294
295 /* Issue command to read EEPROM */
296 ret = hermes_docmd_wait(hw, HERMES_CMD_READMIF, 0, NULL);
297 if (ret)
298 return ret;
299 } else {
300 /* wl_lkm does not include PDA size in the PDA area.
301 * We will pad the information into pda, so other routines
302 * don't have to be modified */
303 pda[0] = cpu_to_le16(pda_len - 2);
304 /* Includes CFG_PROD_DATA but not itself */
305 pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */
306 data_len = pda_len - 4;
307 data = pda + 2;
308 }
309
310 /* Open auxiliary port */
311 ret = hermes_aux_control(hw, 1);
312 pr_debug(PFX "AUX enable returned %d\n", ret);
313 if (ret)
314 return ret;
315
316 /* read PDA from EEPROM */
317 hermes_aux_setaddr(hw, pda_addr);
318 hermes_read_words(hw, HERMES_AUXDATA, data, data_len / 2);
319
320 /* Close aux port */
321 ret = hermes_aux_control(hw, 0);
322 pr_debug(PFX "AUX disable returned %d\n", ret);
323
324 /* Check PDA length */
325 pda_size = le16_to_cpu(pda[0]);
326 pr_debug(PFX "Actual PDA length %d, Max allowed %d\n",
327 pda_size, pda_len);
328 if (pda_size > pda_len)
329 return -EINVAL;
330 214
331 return 0; 215 return 0;
332} 216}
@@ -389,101 +273,13 @@ hermes_blocks_length(const char *first_block, const void *end)
389 273
390/*** Hermes programming ***/ 274/*** Hermes programming ***/
391 275
392/* About to start programming data (Hermes I)
393 * offset is the entry point
394 *
395 * Spectrum_cs' Symbol fw does not require this
396 * wl_lkm Agere fw does
397 * Don't know about intersil
398 */
399int hermesi_program_init(hermes_t *hw, u32 offset)
400{
401 int err;
402
403 /* Disable interrupts?*/
404 /*hw->inten = 0x0;*/
405 /*hermes_write_regn(hw, INTEN, 0);*/
406 /*hermes_set_irqmask(hw, 0);*/
407
408 /* Acknowledge any outstanding command */
409 hermes_write_regn(hw, EVACK, 0xFFFF);
410
411 /* Using doicmd_wait rather than docmd_wait */
412 err = hermes_doicmd_wait(hw,
413 0x0100 | HERMES_CMD_INIT,
414 0, 0, 0, NULL);
415 if (err)
416 return err;
417
418 err = hermes_doicmd_wait(hw,
419 0x0000 | HERMES_CMD_INIT,
420 0, 0, 0, NULL);
421 if (err)
422 return err;
423
424 err = hermes_aux_control(hw, 1);
425 pr_debug(PFX "AUX enable returned %d\n", err);
426
427 if (err)
428 return err;
429
430 pr_debug(PFX "Enabling volatile, EP 0x%08x\n", offset);
431 err = hermes_doicmd_wait(hw,
432 HERMES_PROGRAM_ENABLE_VOLATILE,
433 offset & 0xFFFFu,
434 offset >> 16,
435 0,
436 NULL);
437 pr_debug(PFX "PROGRAM_ENABLE returned %d\n", err);
438
439 return err;
440}
441
442/* Done programming data (Hermes I)
443 *
444 * Spectrum_cs' Symbol fw does not require this
445 * wl_lkm Agere fw does
446 * Don't know about intersil
447 */
448int hermesi_program_end(hermes_t *hw)
449{
450 struct hermes_response resp;
451 int rc = 0;
452 int err;
453
454 rc = hermes_docmd_wait(hw, HERMES_PROGRAM_DISABLE, 0, &resp);
455
456 pr_debug(PFX "PROGRAM_DISABLE returned %d, "
457 "r0 0x%04x, r1 0x%04x, r2 0x%04x\n",
458 rc, resp.resp0, resp.resp1, resp.resp2);
459
460 if ((rc == 0) &&
461 ((resp.status & HERMES_STATUS_CMDCODE) != HERMES_CMD_DOWNLD))
462 rc = -EIO;
463
464 err = hermes_aux_control(hw, 0);
465 pr_debug(PFX "AUX disable returned %d\n", err);
466
467 /* Acknowledge any outstanding command */
468 hermes_write_regn(hw, EVACK, 0xFFFF);
469
470 /* Reinitialise, ignoring return */
471 (void) hermes_doicmd_wait(hw, 0x0000 | HERMES_CMD_INIT,
472 0, 0, 0, NULL);
473
474 return rc ? rc : err;
475}
476
477/* Program the data blocks */ 276/* Program the data blocks */
478int hermes_program(hermes_t *hw, const char *first_block, const void *end) 277int hermes_program(hermes_t *hw, const char *first_block, const void *end)
479{ 278{
480 const struct dblock *blk; 279 const struct dblock *blk;
481 u32 blkaddr; 280 u32 blkaddr;
482 u32 blklen; 281 u32 blklen;
483#if LIMIT_PROGRAM_SIZE 282 int err = 0;
484 u32 addr;
485 u32 len;
486#endif
487 283
488 blk = (const struct dblock *) first_block; 284 blk = (const struct dblock *) first_block;
489 285
@@ -498,30 +294,10 @@ int hermes_program(hermes_t *hw, const char *first_block, const void *end)
498 pr_debug(PFX "Programming block of length %d " 294 pr_debug(PFX "Programming block of length %d "
499 "to address 0x%08x\n", blklen, blkaddr); 295 "to address 0x%08x\n", blklen, blkaddr);
500 296
501#if !LIMIT_PROGRAM_SIZE 297 err = hw->ops->program(hw, blk->data, blkaddr, blklen);
502 /* wl_lkm driver splits this into writes of 2000 bytes */ 298 if (err)
503 hermes_aux_setaddr(hw, blkaddr); 299 break;
504 hermes_write_bytes(hw, HERMES_AUXDATA, blk->data, 300
505 blklen);
506#else
507 len = (blklen < MAX_DL_SIZE) ? blklen : MAX_DL_SIZE;
508 addr = blkaddr;
509
510 while (addr < (blkaddr + blklen)) {
511 pr_debug(PFX "Programming subblock of length %d "
512 "to address 0x%08x. Data @ %p\n",
513 len, addr, &blk->data[addr - blkaddr]);
514
515 hermes_aux_setaddr(hw, addr);
516 hermes_write_bytes(hw, HERMES_AUXDATA,
517 &blk->data[addr - blkaddr],
518 len);
519
520 addr += len;
521 len = ((blkaddr + blklen - addr) < MAX_DL_SIZE) ?
522 (blkaddr + blklen - addr) : MAX_DL_SIZE;
523 }
524#endif
525 blk = (const struct dblock *) &blk->data[blklen]; 301 blk = (const struct dblock *) &blk->data[blklen];
526 302
527 if ((void *) blk > (end - sizeof(*blk))) 303 if ((void *) blk > (end - sizeof(*blk)))
@@ -530,7 +306,7 @@ int hermes_program(hermes_t *hw, const char *first_block, const void *end)
530 blkaddr = dblock_addr(blk); 306 blkaddr = dblock_addr(blk);
531 blklen = dblock_len(blk); 307 blklen = dblock_len(blk);
532 } 308 }
533 return 0; 309 return err;
534} 310}
535 311
536/*** Default plugging data for Hermes I ***/ 312/*** Default plugging data for Hermes I ***/
@@ -690,9 +466,8 @@ int hermes_apply_pda_with_defaults(hermes_t *hw,
690 if ((pdi_len(pdi) == pdr_len(pdr)) && 466 if ((pdi_len(pdi) == pdr_len(pdr)) &&
691 ((void *) pdi->data + pdi_len(pdi) < pda_end)) { 467 ((void *) pdi->data + pdi_len(pdi) < pda_end)) {
692 /* do the actual plugging */ 468 /* do the actual plugging */
693 hermes_aux_setaddr(hw, pdr_addr(pdr)); 469 hw->ops->program(hw, pdi->data, pdr_addr(pdr),
694 hermes_write_bytes(hw, HERMES_AUXDATA, 470 pdi_len(pdi));
695 pdi->data, pdi_len(pdi));
696 } 471 }
697 } 472 }
698 473
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 9f657afaa3e5..6fbd78850123 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -177,9 +177,9 @@ int determine_fw_capabilities(struct orinoco_private *priv,
177 /* 3Com MAC : 00:50:DA:* */ 177 /* 3Com MAC : 00:50:DA:* */
178 memset(tmp, 0, sizeof(tmp)); 178 memset(tmp, 0, sizeof(tmp));
179 /* Get the Symbol firmware version */ 179 /* Get the Symbol firmware version */
180 err = hermes_read_ltv(hw, USER_BAP, 180 err = hw->ops->read_ltv(hw, USER_BAP,
181 HERMES_RID_SECONDARYVERSION_SYMBOL, 181 HERMES_RID_SECONDARYVERSION_SYMBOL,
182 SYMBOL_MAX_VER_LEN, NULL, &tmp); 182 SYMBOL_MAX_VER_LEN, NULL, &tmp);
183 if (err) { 183 if (err) {
184 dev_warn(dev, "Error %d reading Symbol firmware info. " 184 dev_warn(dev, "Error %d reading Symbol firmware info. "
185 "Wildly guessing capabilities...\n", err); 185 "Wildly guessing capabilities...\n", err);
@@ -286,8 +286,8 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
286 u16 reclen; 286 u16 reclen;
287 287
288 /* Get the MAC address */ 288 /* Get the MAC address */
289 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR, 289 err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
290 ETH_ALEN, NULL, dev_addr); 290 ETH_ALEN, NULL, dev_addr);
291 if (err) { 291 if (err) {
292 dev_warn(dev, "Failed to read MAC address!\n"); 292 dev_warn(dev, "Failed to read MAC address!\n");
293 goto out; 293 goto out;
@@ -296,8 +296,8 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
296 dev_dbg(dev, "MAC address %pM\n", dev_addr); 296 dev_dbg(dev, "MAC address %pM\n", dev_addr);
297 297
298 /* Get the station name */ 298 /* Get the station name */
299 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME, 299 err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
300 sizeof(nickbuf), &reclen, &nickbuf); 300 sizeof(nickbuf), &reclen, &nickbuf);
301 if (err) { 301 if (err) {
302 dev_err(dev, "failed to read station name\n"); 302 dev_err(dev, "failed to read station name\n");
303 goto out; 303 goto out;
@@ -374,6 +374,32 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
374 err = hermes_read_wordrec(hw, USER_BAP, 374 err = hermes_read_wordrec(hw, USER_BAP,
375 HERMES_RID_CNFPREAMBLE_SYMBOL, 375 HERMES_RID_CNFPREAMBLE_SYMBOL,
376 &priv->preamble); 376 &priv->preamble);
377 if (err) {
378 dev_err(dev, "Failed to read preamble setup\n");
379 goto out;
380 }
381 }
382
383 /* Retry settings */
384 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_SHORTRETRYLIMIT,
385 &priv->short_retry_limit);
386 if (err) {
387 dev_err(dev, "Failed to read short retry limit\n");
388 goto out;
389 }
390
391 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_LONGRETRYLIMIT,
392 &priv->long_retry_limit);
393 if (err) {
394 dev_err(dev, "Failed to read long retry limit\n");
395 goto out;
396 }
397
398 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_MAXTRANSMITLIFETIME,
399 &priv->retry_lifetime);
400 if (err) {
401 dev_err(dev, "Failed to read max retry lifetime\n");
402 goto out;
377 } 403 }
378 404
379out: 405out:
@@ -387,11 +413,11 @@ int orinoco_hw_allocate_fid(struct orinoco_private *priv)
387 struct hermes *hw = &priv->hw; 413 struct hermes *hw = &priv->hw;
388 int err; 414 int err;
389 415
390 err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid); 416 err = hw->ops->allocate(hw, priv->nicbuf_size, &priv->txfid);
391 if (err == -EIO && priv->nicbuf_size > TX_NICBUF_SIZE_BUG) { 417 if (err == -EIO && priv->nicbuf_size > TX_NICBUF_SIZE_BUG) {
392 /* Try workaround for old Symbol firmware bug */ 418 /* Try workaround for old Symbol firmware bug */
393 priv->nicbuf_size = TX_NICBUF_SIZE_BUG; 419 priv->nicbuf_size = TX_NICBUF_SIZE_BUG;
394 err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid); 420 err = hw->ops->allocate(hw, priv->nicbuf_size, &priv->txfid);
395 421
396 dev_warn(dev, "Firmware ALLOC bug detected " 422 dev_warn(dev, "Firmware ALLOC bug detected "
397 "(old Symbol firmware?). Work around %s\n", 423 "(old Symbol firmware?). Work around %s\n",
@@ -437,8 +463,9 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
437 struct hermes_idstring idbuf; 463 struct hermes_idstring idbuf;
438 464
439 /* Set the MAC address */ 465 /* Set the MAC address */
440 err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR, 466 err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
441 HERMES_BYTES_TO_RECLEN(ETH_ALEN), dev->dev_addr); 467 HERMES_BYTES_TO_RECLEN(ETH_ALEN),
468 dev->dev_addr);
442 if (err) { 469 if (err) {
443 printk(KERN_ERR "%s: Error %d setting MAC address\n", 470 printk(KERN_ERR "%s: Error %d setting MAC address\n",
444 dev->name, err); 471 dev->name, err);
@@ -501,7 +528,7 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
501 idbuf.len = cpu_to_le16(strlen(priv->desired_essid)); 528 idbuf.len = cpu_to_le16(strlen(priv->desired_essid));
502 memcpy(&idbuf.val, priv->desired_essid, sizeof(idbuf.val)); 529 memcpy(&idbuf.val, priv->desired_essid, sizeof(idbuf.val));
503 /* WinXP wants partner to configure OWNSSID even in IBSS mode. (jimc) */ 530 /* WinXP wants partner to configure OWNSSID even in IBSS mode. (jimc) */
504 err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNSSID, 531 err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNSSID,
505 HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2), 532 HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
506 &idbuf); 533 &idbuf);
507 if (err) { 534 if (err) {
@@ -509,7 +536,7 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
509 dev->name, err); 536 dev->name, err);
510 return err; 537 return err;
511 } 538 }
512 err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFDESIREDSSID, 539 err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFDESIREDSSID,
513 HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2), 540 HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
514 &idbuf); 541 &idbuf);
515 if (err) { 542 if (err) {
@@ -521,9 +548,9 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
521 /* Set the station name */ 548 /* Set the station name */
522 idbuf.len = cpu_to_le16(strlen(priv->nick)); 549 idbuf.len = cpu_to_le16(strlen(priv->nick));
523 memcpy(&idbuf.val, priv->nick, sizeof(idbuf.val)); 550 memcpy(&idbuf.val, priv->nick, sizeof(idbuf.val));
524 err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME, 551 err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
525 HERMES_BYTES_TO_RECLEN(strlen(priv->nick)+2), 552 HERMES_BYTES_TO_RECLEN(strlen(priv->nick)+2),
526 &idbuf); 553 &idbuf);
527 if (err) { 554 if (err) {
528 printk(KERN_ERR "%s: Error %d setting nickname\n", 555 printk(KERN_ERR "%s: Error %d setting nickname\n",
529 dev->name, err); 556 dev->name, err);
@@ -638,12 +665,12 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
638 if (priv->iw_mode == NL80211_IFTYPE_MONITOR) { 665 if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
639 /* Enable monitor mode */ 666 /* Enable monitor mode */
640 dev->type = ARPHRD_IEEE80211; 667 dev->type = ARPHRD_IEEE80211;
641 err = hermes_docmd_wait(hw, HERMES_CMD_TEST | 668 err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
642 HERMES_TEST_MONITOR, 0, NULL); 669 HERMES_TEST_MONITOR, 0, NULL);
643 } else { 670 } else {
644 /* Disable monitor mode */ 671 /* Disable monitor mode */
645 dev->type = ARPHRD_ETHER; 672 dev->type = ARPHRD_ETHER;
646 err = hermes_docmd_wait(hw, HERMES_CMD_TEST | 673 err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
647 HERMES_TEST_STOP, 0, NULL); 674 HERMES_TEST_STOP, 0, NULL);
648 } 675 }
649 if (err) 676 if (err)
@@ -669,8 +696,8 @@ int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key, u8 *tsc)
669 if ((key < 0) || (key >= 4)) 696 if ((key < 0) || (key >= 4))
670 return -EINVAL; 697 return -EINVAL;
671 698
672 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV, 699 err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV,
673 sizeof(tsc_arr), NULL, &tsc_arr); 700 sizeof(tsc_arr), NULL, &tsc_arr);
674 if (!err) 701 if (!err)
675 memcpy(tsc, &tsc_arr[key][0], sizeof(tsc_arr[0])); 702 memcpy(tsc, &tsc_arr[key][0], sizeof(tsc_arr[0]));
676 703
@@ -849,7 +876,7 @@ int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv)
849 memcpy(key, priv->keys[i].key, 876 memcpy(key, priv->keys[i].key,
850 priv->keys[i].key_len); 877 priv->keys[i].key_len);
851 878
852 err = hermes_write_ltv(hw, USER_BAP, 879 err = hw->ops->write_ltv(hw, USER_BAP,
853 HERMES_RID_CNFDEFAULTKEY0 + i, 880 HERMES_RID_CNFDEFAULTKEY0 + i,
854 HERMES_BYTES_TO_RECLEN(keylen), 881 HERMES_BYTES_TO_RECLEN(keylen),
855 key); 882 key);
@@ -1066,7 +1093,7 @@ int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
1066 memcpy(mclist.addr[i++], ha->addr, ETH_ALEN); 1093 memcpy(mclist.addr[i++], ha->addr, ETH_ALEN);
1067 } 1094 }
1068 1095
1069 err = hermes_write_ltv(hw, USER_BAP, 1096 err = hw->ops->write_ltv(hw, USER_BAP,
1070 HERMES_RID_CNFGROUPADDRESSES, 1097 HERMES_RID_CNFGROUPADDRESSES,
1071 HERMES_BYTES_TO_RECLEN(mc_count * ETH_ALEN), 1098 HERMES_BYTES_TO_RECLEN(mc_count * ETH_ALEN),
1072 &mclist); 1099 &mclist);
@@ -1108,15 +1135,15 @@ int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
1108 rid = (priv->port_type == 3) ? HERMES_RID_CNFOWNSSID : 1135 rid = (priv->port_type == 3) ? HERMES_RID_CNFOWNSSID :
1109 HERMES_RID_CNFDESIREDSSID; 1136 HERMES_RID_CNFDESIREDSSID;
1110 1137
1111 err = hermes_read_ltv(hw, USER_BAP, rid, sizeof(essidbuf), 1138 err = hw->ops->read_ltv(hw, USER_BAP, rid, sizeof(essidbuf),
1112 NULL, &essidbuf); 1139 NULL, &essidbuf);
1113 if (err) 1140 if (err)
1114 goto fail_unlock; 1141 goto fail_unlock;
1115 } else { 1142 } else {
1116 *active = 0; 1143 *active = 0;
1117 1144
1118 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTSSID, 1145 err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTSSID,
1119 sizeof(essidbuf), NULL, &essidbuf); 1146 sizeof(essidbuf), NULL, &essidbuf);
1120 if (err) 1147 if (err)
1121 goto fail_unlock; 1148 goto fail_unlock;
1122 } 1149 }
@@ -1187,8 +1214,8 @@ int orinoco_hw_get_bitratelist(struct orinoco_private *priv,
1187 if (orinoco_lock(priv, &flags) != 0) 1214 if (orinoco_lock(priv, &flags) != 0)
1188 return -EBUSY; 1215 return -EBUSY;
1189 1216
1190 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_SUPPORTEDDATARATES, 1217 err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_SUPPORTEDDATARATES,
1191 sizeof(list), NULL, &list); 1218 sizeof(list), NULL, &list);
1192 orinoco_unlock(priv, &flags); 1219 orinoco_unlock(priv, &flags);
1193 1220
1194 if (err) 1221 if (err)
@@ -1255,7 +1282,7 @@ int orinoco_hw_trigger_scan(struct orinoco_private *priv,
1255 idbuf.len = cpu_to_le16(len); 1282 idbuf.len = cpu_to_le16(len);
1256 memcpy(idbuf.val, ssid->ssid, len); 1283 memcpy(idbuf.val, ssid->ssid, len);
1257 1284
1258 err = hermes_write_ltv(hw, USER_BAP, 1285 err = hw->ops->write_ltv(hw, USER_BAP,
1259 HERMES_RID_CNFSCANSSID_AGERE, 1286 HERMES_RID_CNFSCANSSID_AGERE,
1260 HERMES_BYTES_TO_RECLEN(len + 2), 1287 HERMES_BYTES_TO_RECLEN(len + 2),
1261 &idbuf); 1288 &idbuf);
@@ -1319,8 +1346,8 @@ int orinoco_hw_get_current_bssid(struct orinoco_private *priv,
1319 hermes_t *hw = &priv->hw; 1346 hermes_t *hw = &priv->hw;
1320 int err; 1347 int err;
1321 1348
1322 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID, 1349 err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
1323 ETH_ALEN, NULL, addr); 1350 ETH_ALEN, NULL, addr);
1324 1351
1325 return err; 1352 return err;
1326} 1353}
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index 413e9ab6cab3..1d60c7e4392a 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -254,7 +254,7 @@ void set_port_type(struct orinoco_private *priv)
254/* Device methods */ 254/* Device methods */
255/********************************************************************/ 255/********************************************************************/
256 256
257static int orinoco_open(struct net_device *dev) 257int orinoco_open(struct net_device *dev)
258{ 258{
259 struct orinoco_private *priv = ndev_priv(dev); 259 struct orinoco_private *priv = ndev_priv(dev);
260 unsigned long flags; 260 unsigned long flags;
@@ -272,8 +272,9 @@ static int orinoco_open(struct net_device *dev)
272 272
273 return err; 273 return err;
274} 274}
275EXPORT_SYMBOL(orinoco_open);
275 276
276static int orinoco_stop(struct net_device *dev) 277int orinoco_stop(struct net_device *dev)
277{ 278{
278 struct orinoco_private *priv = ndev_priv(dev); 279 struct orinoco_private *priv = ndev_priv(dev);
279 int err = 0; 280 int err = 0;
@@ -281,25 +282,27 @@ static int orinoco_stop(struct net_device *dev)
281 /* We mustn't use orinoco_lock() here, because we need to be 282 /* We mustn't use orinoco_lock() here, because we need to be
282 able to close the interface even if hw_unavailable is set 283 able to close the interface even if hw_unavailable is set
283 (e.g. as we're released after a PC Card removal) */ 284 (e.g. as we're released after a PC Card removal) */
284 spin_lock_irq(&priv->lock); 285 orinoco_lock_irq(priv);
285 286
286 priv->open = 0; 287 priv->open = 0;
287 288
288 err = __orinoco_down(priv); 289 err = __orinoco_down(priv);
289 290
290 spin_unlock_irq(&priv->lock); 291 orinoco_unlock_irq(priv);
291 292
292 return err; 293 return err;
293} 294}
295EXPORT_SYMBOL(orinoco_stop);
294 296
295static struct net_device_stats *orinoco_get_stats(struct net_device *dev) 297struct net_device_stats *orinoco_get_stats(struct net_device *dev)
296{ 298{
297 struct orinoco_private *priv = ndev_priv(dev); 299 struct orinoco_private *priv = ndev_priv(dev);
298 300
299 return &priv->stats; 301 return &priv->stats;
300} 302}
303EXPORT_SYMBOL(orinoco_get_stats);
301 304
302static void orinoco_set_multicast_list(struct net_device *dev) 305void orinoco_set_multicast_list(struct net_device *dev)
303{ 306{
304 struct orinoco_private *priv = ndev_priv(dev); 307 struct orinoco_private *priv = ndev_priv(dev);
305 unsigned long flags; 308 unsigned long flags;
@@ -313,8 +316,9 @@ static void orinoco_set_multicast_list(struct net_device *dev)
313 __orinoco_set_multicast_list(dev); 316 __orinoco_set_multicast_list(dev);
314 orinoco_unlock(priv, &flags); 317 orinoco_unlock(priv, &flags);
315} 318}
319EXPORT_SYMBOL(orinoco_set_multicast_list);
316 320
317static int orinoco_change_mtu(struct net_device *dev, int new_mtu) 321int orinoco_change_mtu(struct net_device *dev, int new_mtu)
318{ 322{
319 struct orinoco_private *priv = ndev_priv(dev); 323 struct orinoco_private *priv = ndev_priv(dev);
320 324
@@ -330,6 +334,7 @@ static int orinoco_change_mtu(struct net_device *dev, int new_mtu)
330 334
331 return 0; 335 return 0;
332} 336}
337EXPORT_SYMBOL(orinoco_change_mtu);
333 338
334/********************************************************************/ 339/********************************************************************/
335/* Tx path */ 340/* Tx path */
@@ -400,8 +405,8 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
400 memset(&desc, 0, sizeof(desc)); 405 memset(&desc, 0, sizeof(desc));
401 406
402 *txcntl = cpu_to_le16(tx_control); 407 *txcntl = cpu_to_le16(tx_control);
403 err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc), 408 err = hw->ops->bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
404 txfid, 0); 409 txfid, 0);
405 if (err) { 410 if (err) {
406 if (net_ratelimit()) 411 if (net_ratelimit())
407 printk(KERN_ERR "%s: Error %d writing Tx " 412 printk(KERN_ERR "%s: Error %d writing Tx "
@@ -414,8 +419,8 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
414 memset(&desc, 0, sizeof(desc)); 419 memset(&desc, 0, sizeof(desc));
415 420
416 desc.tx_control = cpu_to_le16(tx_control); 421 desc.tx_control = cpu_to_le16(tx_control);
417 err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc), 422 err = hw->ops->bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
418 txfid, 0); 423 txfid, 0);
419 if (err) { 424 if (err) {
420 if (net_ratelimit()) 425 if (net_ratelimit())
421 printk(KERN_ERR "%s: Error %d writing Tx " 426 printk(KERN_ERR "%s: Error %d writing Tx "
@@ -458,8 +463,8 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
458 memcpy(eh, &hdr, sizeof(hdr)); 463 memcpy(eh, &hdr, sizeof(hdr));
459 } 464 }
460 465
461 err = hermes_bap_pwrite(hw, USER_BAP, skb->data, skb->len, 466 err = hw->ops->bap_pwrite(hw, USER_BAP, skb->data, skb->len,
462 txfid, HERMES_802_3_OFFSET); 467 txfid, HERMES_802_3_OFFSET);
463 if (err) { 468 if (err) {
464 printk(KERN_ERR "%s: Error %d writing packet to BAP\n", 469 printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
465 dev->name, err); 470 dev->name, err);
@@ -490,8 +495,8 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
490 skb->data + ETH_HLEN, skb->len - ETH_HLEN, mic); 495 skb->data + ETH_HLEN, skb->len - ETH_HLEN, mic);
491 496
492 /* Write the MIC */ 497 /* Write the MIC */
493 err = hermes_bap_pwrite(hw, USER_BAP, &mic_buf[0], len, 498 err = hw->ops->bap_pwrite(hw, USER_BAP, &mic_buf[0], len,
494 txfid, HERMES_802_3_OFFSET + offset); 499 txfid, HERMES_802_3_OFFSET + offset);
495 if (err) { 500 if (err) {
496 printk(KERN_ERR "%s: Error %d writing MIC to BAP\n", 501 printk(KERN_ERR "%s: Error %d writing MIC to BAP\n",
497 dev->name, err); 502 dev->name, err);
@@ -502,7 +507,7 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
502 /* Finally, we actually initiate the send */ 507 /* Finally, we actually initiate the send */
503 netif_stop_queue(dev); 508 netif_stop_queue(dev);
504 509
505 err = hermes_docmd_wait(hw, HERMES_CMD_TX | HERMES_CMD_RECL, 510 err = hw->ops->cmd_wait(hw, HERMES_CMD_TX | HERMES_CMD_RECL,
506 txfid, NULL); 511 txfid, NULL);
507 if (err) { 512 if (err) {
508 netif_start_queue(dev); 513 netif_start_queue(dev);
@@ -572,9 +577,9 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
572 return; /* Nothing's really happened */ 577 return; /* Nothing's really happened */
573 578
574 /* Read part of the frame header - we need status and addr1 */ 579 /* Read part of the frame header - we need status and addr1 */
575 err = hermes_bap_pread(hw, IRQ_BAP, &hdr, 580 err = hw->ops->bap_pread(hw, IRQ_BAP, &hdr,
576 sizeof(struct hermes_txexc_data), 581 sizeof(struct hermes_txexc_data),
577 fid, 0); 582 fid, 0);
578 583
579 hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID); 584 hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
580 stats->tx_errors++; 585 stats->tx_errors++;
@@ -615,7 +620,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
615 netif_wake_queue(dev); 620 netif_wake_queue(dev);
616} 621}
617 622
618static void orinoco_tx_timeout(struct net_device *dev) 623void orinoco_tx_timeout(struct net_device *dev)
619{ 624{
620 struct orinoco_private *priv = ndev_priv(dev); 625 struct orinoco_private *priv = ndev_priv(dev);
621 struct net_device_stats *stats = &priv->stats; 626 struct net_device_stats *stats = &priv->stats;
@@ -630,6 +635,7 @@ static void orinoco_tx_timeout(struct net_device *dev)
630 635
631 schedule_work(&priv->reset_work); 636 schedule_work(&priv->reset_work);
632} 637}
638EXPORT_SYMBOL(orinoco_tx_timeout);
633 639
634/********************************************************************/ 640/********************************************************************/
635/* Rx path (data frames) */ 641/* Rx path (data frames) */
@@ -764,9 +770,9 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
764 770
765 /* If any, copy the data from the card to the skb */ 771 /* If any, copy the data from the card to the skb */
766 if (datalen > 0) { 772 if (datalen > 0) {
767 err = hermes_bap_pread(hw, IRQ_BAP, skb_put(skb, datalen), 773 err = hw->ops->bap_pread(hw, IRQ_BAP, skb_put(skb, datalen),
768 ALIGN(datalen, 2), rxfid, 774 ALIGN(datalen, 2), rxfid,
769 HERMES_802_2_OFFSET); 775 HERMES_802_2_OFFSET);
770 if (err) { 776 if (err) {
771 printk(KERN_ERR "%s: error %d reading monitor frame\n", 777 printk(KERN_ERR "%s: error %d reading monitor frame\n",
772 dev->name, err); 778 dev->name, err);
@@ -792,7 +798,7 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
792 stats->rx_dropped++; 798 stats->rx_dropped++;
793} 799}
794 800
795static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw) 801void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
796{ 802{
797 struct orinoco_private *priv = ndev_priv(dev); 803 struct orinoco_private *priv = ndev_priv(dev);
798 struct net_device_stats *stats = &priv->stats; 804 struct net_device_stats *stats = &priv->stats;
@@ -814,8 +820,8 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
814 820
815 rxfid = hermes_read_regn(hw, RXFID); 821 rxfid = hermes_read_regn(hw, RXFID);
816 822
817 err = hermes_bap_pread(hw, IRQ_BAP, desc, sizeof(*desc), 823 err = hw->ops->bap_pread(hw, IRQ_BAP, desc, sizeof(*desc),
818 rxfid, 0); 824 rxfid, 0);
819 if (err) { 825 if (err) {
820 printk(KERN_ERR "%s: error %d reading Rx descriptor. " 826 printk(KERN_ERR "%s: error %d reading Rx descriptor. "
821 "Frame dropped.\n", dev->name, err); 827 "Frame dropped.\n", dev->name, err);
@@ -882,9 +888,9 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
882 nothing is removed. 2 is for aligning the IP header. */ 888 nothing is removed. 2 is for aligning the IP header. */
883 skb_reserve(skb, ETH_HLEN + 2); 889 skb_reserve(skb, ETH_HLEN + 2);
884 890
885 err = hermes_bap_pread(hw, IRQ_BAP, skb_put(skb, length), 891 err = hw->ops->bap_pread(hw, IRQ_BAP, skb_put(skb, length),
886 ALIGN(length, 2), rxfid, 892 ALIGN(length, 2), rxfid,
887 HERMES_802_2_OFFSET); 893 HERMES_802_2_OFFSET);
888 if (err) { 894 if (err) {
889 printk(KERN_ERR "%s: error %d reading frame. " 895 printk(KERN_ERR "%s: error %d reading frame. "
890 "Frame dropped.\n", dev->name, err); 896 "Frame dropped.\n", dev->name, err);
@@ -913,6 +919,7 @@ update_stats:
913out: 919out:
914 kfree(desc); 920 kfree(desc);
915} 921}
922EXPORT_SYMBOL(__orinoco_ev_rx);
916 923
917static void orinoco_rx(struct net_device *dev, 924static void orinoco_rx(struct net_device *dev,
918 struct hermes_rx_descriptor *desc, 925 struct hermes_rx_descriptor *desc,
@@ -1145,9 +1152,9 @@ static void orinoco_join_ap(struct work_struct *work)
1145 goto out; 1152 goto out;
1146 1153
1147 /* Read scan results from the firmware */ 1154 /* Read scan results from the firmware */
1148 err = hermes_read_ltv(hw, USER_BAP, 1155 err = hw->ops->read_ltv(hw, USER_BAP,
1149 HERMES_RID_SCANRESULTSTABLE, 1156 HERMES_RID_SCANRESULTSTABLE,
1150 MAX_SCAN_LEN, &len, buf); 1157 MAX_SCAN_LEN, &len, buf);
1151 if (err) { 1158 if (err) {
1152 printk(KERN_ERR "%s: Cannot read scan results\n", 1159 printk(KERN_ERR "%s: Cannot read scan results\n",
1153 dev->name); 1160 dev->name);
@@ -1194,8 +1201,8 @@ static void orinoco_send_bssid_wevent(struct orinoco_private *priv)
1194 union iwreq_data wrqu; 1201 union iwreq_data wrqu;
1195 int err; 1202 int err;
1196 1203
1197 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID, 1204 err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
1198 ETH_ALEN, NULL, wrqu.ap_addr.sa_data); 1205 ETH_ALEN, NULL, wrqu.ap_addr.sa_data);
1199 if (err != 0) 1206 if (err != 0)
1200 return; 1207 return;
1201 1208
@@ -1217,8 +1224,8 @@ static void orinoco_send_assocreqie_wevent(struct orinoco_private *priv)
1217 if (!priv->has_wpa) 1224 if (!priv->has_wpa)
1218 return; 1225 return;
1219 1226
1220 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_REQ_INFO, 1227 err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_REQ_INFO,
1221 sizeof(buf), NULL, &buf); 1228 sizeof(buf), NULL, &buf);
1222 if (err != 0) 1229 if (err != 0)
1223 return; 1230 return;
1224 1231
@@ -1247,8 +1254,9 @@ static void orinoco_send_assocrespie_wevent(struct orinoco_private *priv)
1247 if (!priv->has_wpa) 1254 if (!priv->has_wpa)
1248 return; 1255 return;
1249 1256
1250 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_RESP_INFO, 1257 err = hw->ops->read_ltv(hw, USER_BAP,
1251 sizeof(buf), NULL, &buf); 1258 HERMES_RID_CURRENT_ASSOC_RESP_INFO,
1259 sizeof(buf), NULL, &buf);
1252 if (err != 0) 1260 if (err != 0)
1253 return; 1261 return;
1254 1262
@@ -1353,7 +1361,7 @@ static void orinoco_process_scan_results(struct work_struct *work)
1353 spin_unlock_irqrestore(&priv->scan_lock, flags); 1361 spin_unlock_irqrestore(&priv->scan_lock, flags);
1354} 1362}
1355 1363
1356static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw) 1364void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1357{ 1365{
1358 struct orinoco_private *priv = ndev_priv(dev); 1366 struct orinoco_private *priv = ndev_priv(dev);
1359 u16 infofid; 1367 u16 infofid;
@@ -1371,8 +1379,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1371 infofid = hermes_read_regn(hw, INFOFID); 1379 infofid = hermes_read_regn(hw, INFOFID);
1372 1380
1373 /* Read the info frame header - don't try too hard */ 1381 /* Read the info frame header - don't try too hard */
1374 err = hermes_bap_pread(hw, IRQ_BAP, &info, sizeof(info), 1382 err = hw->ops->bap_pread(hw, IRQ_BAP, &info, sizeof(info),
1375 infofid, 0); 1383 infofid, 0);
1376 if (err) { 1384 if (err) {
1377 printk(KERN_ERR "%s: error %d reading info frame. " 1385 printk(KERN_ERR "%s: error %d reading info frame. "
1378 "Frame dropped.\n", dev->name, err); 1386 "Frame dropped.\n", dev->name, err);
@@ -1393,8 +1401,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1393 len = sizeof(tallies); 1401 len = sizeof(tallies);
1394 } 1402 }
1395 1403
1396 err = hermes_bap_pread(hw, IRQ_BAP, &tallies, len, 1404 err = hw->ops->bap_pread(hw, IRQ_BAP, &tallies, len,
1397 infofid, sizeof(info)); 1405 infofid, sizeof(info));
1398 if (err) 1406 if (err)
1399 break; 1407 break;
1400 1408
@@ -1429,8 +1437,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1429 break; 1437 break;
1430 } 1438 }
1431 1439
1432 err = hermes_bap_pread(hw, IRQ_BAP, &linkstatus, len, 1440 err = hw->ops->bap_pread(hw, IRQ_BAP, &linkstatus, len,
1433 infofid, sizeof(info)); 1441 infofid, sizeof(info));
1434 if (err) 1442 if (err)
1435 break; 1443 break;
1436 newstatus = le16_to_cpu(linkstatus.linkstatus); 1444 newstatus = le16_to_cpu(linkstatus.linkstatus);
@@ -1494,8 +1502,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1494 } 1502 }
1495 1503
1496 /* Read scan data */ 1504 /* Read scan data */
1497 err = hermes_bap_pread(hw, IRQ_BAP, (void *) buf, len, 1505 err = hw->ops->bap_pread(hw, IRQ_BAP, (void *) buf, len,
1498 infofid, sizeof(info)); 1506 infofid, sizeof(info));
1499 if (err) { 1507 if (err) {
1500 kfree(buf); 1508 kfree(buf);
1501 qabort_scan(priv); 1509 qabort_scan(priv);
@@ -1547,8 +1555,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1547 break; 1555 break;
1548 1556
1549 /* Read scan data */ 1557 /* Read scan data */
1550 err = hermes_bap_pread(hw, IRQ_BAP, (void *) bss, len, 1558 err = hw->ops->bap_pread(hw, IRQ_BAP, (void *) bss, len,
1551 infofid, sizeof(info)); 1559 infofid, sizeof(info));
1552 if (err) 1560 if (err)
1553 kfree(bss); 1561 kfree(bss);
1554 else 1562 else
@@ -1571,6 +1579,7 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1571 1579
1572 return; 1580 return;
1573} 1581}
1582EXPORT_SYMBOL(__orinoco_ev_info);
1574 1583
1575static void __orinoco_ev_infdrop(struct net_device *dev, hermes_t *hw) 1584static void __orinoco_ev_infdrop(struct net_device *dev, hermes_t *hw)
1576{ 1585{
@@ -1647,7 +1656,7 @@ static int orinoco_reinit_firmware(struct orinoco_private *priv)
1647 struct hermes *hw = &priv->hw; 1656 struct hermes *hw = &priv->hw;
1648 int err; 1657 int err;
1649 1658
1650 err = hermes_init(hw); 1659 err = hw->ops->init(hw);
1651 if (priv->do_fw_download && !err) { 1660 if (priv->do_fw_download && !err) {
1652 err = orinoco_download(priv); 1661 err = orinoco_download(priv);
1653 if (err) 1662 if (err)
@@ -1735,7 +1744,7 @@ void orinoco_reset(struct work_struct *work)
1735 } 1744 }
1736 1745
1737 /* This has to be called from user context */ 1746 /* This has to be called from user context */
1738 spin_lock_irq(&priv->lock); 1747 orinoco_lock_irq(priv);
1739 1748
1740 priv->hw_unavailable--; 1749 priv->hw_unavailable--;
1741 1750
@@ -1750,7 +1759,7 @@ void orinoco_reset(struct work_struct *work)
1750 dev->trans_start = jiffies; 1759 dev->trans_start = jiffies;
1751 } 1760 }
1752 1761
1753 spin_unlock_irq(&priv->lock); 1762 orinoco_unlock_irq(priv);
1754 1763
1755 return; 1764 return;
1756 disable: 1765 disable:
@@ -1984,7 +1993,7 @@ int orinoco_init(struct orinoco_private *priv)
1984 priv->nicbuf_size = IEEE80211_MAX_FRAME_LEN + ETH_HLEN; 1993 priv->nicbuf_size = IEEE80211_MAX_FRAME_LEN + ETH_HLEN;
1985 1994
1986 /* Initialize the firmware */ 1995 /* Initialize the firmware */
1987 err = hermes_init(hw); 1996 err = hw->ops->init(hw);
1988 if (err != 0) { 1997 if (err != 0) {
1989 dev_err(dev, "Failed to initialize firmware (err = %d)\n", 1998 dev_err(dev, "Failed to initialize firmware (err = %d)\n",
1990 err); 1999 err);
@@ -2067,9 +2076,9 @@ int orinoco_init(struct orinoco_private *priv)
2067 2076
2068 /* Make the hardware available, as long as it hasn't been 2077 /* Make the hardware available, as long as it hasn't been
2069 * removed elsewhere (e.g. by PCMCIA hot unplug) */ 2078 * removed elsewhere (e.g. by PCMCIA hot unplug) */
2070 spin_lock_irq(&priv->lock); 2079 orinoco_lock_irq(priv);
2071 priv->hw_unavailable--; 2080 priv->hw_unavailable--;
2072 spin_unlock_irq(&priv->lock); 2081 orinoco_unlock_irq(priv);
2073 2082
2074 dev_dbg(dev, "Ready\n"); 2083 dev_dbg(dev, "Ready\n");
2075 2084
@@ -2192,7 +2201,8 @@ EXPORT_SYMBOL(alloc_orinocodev);
2192 */ 2201 */
2193int orinoco_if_add(struct orinoco_private *priv, 2202int orinoco_if_add(struct orinoco_private *priv,
2194 unsigned long base_addr, 2203 unsigned long base_addr,
2195 unsigned int irq) 2204 unsigned int irq,
2205 const struct net_device_ops *ops)
2196{ 2206{
2197 struct wiphy *wiphy = priv_to_wiphy(priv); 2207 struct wiphy *wiphy = priv_to_wiphy(priv);
2198 struct wireless_dev *wdev; 2208 struct wireless_dev *wdev;
@@ -2211,12 +2221,17 @@ int orinoco_if_add(struct orinoco_private *priv,
2211 2221
2212 /* Setup / override net_device fields */ 2222 /* Setup / override net_device fields */
2213 dev->ieee80211_ptr = wdev; 2223 dev->ieee80211_ptr = wdev;
2214 dev->netdev_ops = &orinoco_netdev_ops;
2215 dev->watchdog_timeo = HZ; /* 1 second timeout */ 2224 dev->watchdog_timeo = HZ; /* 1 second timeout */
2216 dev->wireless_handlers = &orinoco_handler_def; 2225 dev->wireless_handlers = &orinoco_handler_def;
2217#ifdef WIRELESS_SPY 2226#ifdef WIRELESS_SPY
2218 dev->wireless_data = &priv->wireless_data; 2227 dev->wireless_data = &priv->wireless_data;
2219#endif 2228#endif
2229 /* Default to standard ops if not set */
2230 if (ops)
2231 dev->netdev_ops = ops;
2232 else
2233 dev->netdev_ops = &orinoco_netdev_ops;
2234
2220 /* we use the default eth_mac_addr for setting the MAC addr */ 2235 /* we use the default eth_mac_addr for setting the MAC addr */
2221 2236
2222 /* Reserve space in skb for the SNAP header */ 2237 /* Reserve space in skb for the SNAP header */
@@ -2305,7 +2320,7 @@ int orinoco_up(struct orinoco_private *priv)
2305 unsigned long flags; 2320 unsigned long flags;
2306 int err; 2321 int err;
2307 2322
2308 spin_lock_irqsave(&priv->lock, flags); 2323 priv->hw.ops->lock_irqsave(&priv->lock, &flags);
2309 2324
2310 err = orinoco_reinit_firmware(priv); 2325 err = orinoco_reinit_firmware(priv);
2311 if (err) { 2326 if (err) {
@@ -2325,7 +2340,7 @@ int orinoco_up(struct orinoco_private *priv)
2325 } 2340 }
2326 2341
2327exit: 2342exit:
2328 spin_unlock_irqrestore(&priv->lock, flags); 2343 priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
2329 2344
2330 return 0; 2345 return 0;
2331} 2346}
@@ -2337,7 +2352,7 @@ void orinoco_down(struct orinoco_private *priv)
2337 unsigned long flags; 2352 unsigned long flags;
2338 int err; 2353 int err;
2339 2354
2340 spin_lock_irqsave(&priv->lock, flags); 2355 priv->hw.ops->lock_irqsave(&priv->lock, &flags);
2341 err = __orinoco_down(priv); 2356 err = __orinoco_down(priv);
2342 if (err) 2357 if (err)
2343 printk(KERN_WARNING "%s: Error %d downing interface\n", 2358 printk(KERN_WARNING "%s: Error %d downing interface\n",
@@ -2345,7 +2360,7 @@ void orinoco_down(struct orinoco_private *priv)
2345 2360
2346 netif_device_detach(dev); 2361 netif_device_detach(dev);
2347 priv->hw_unavailable++; 2362 priv->hw_unavailable++;
2348 spin_unlock_irqrestore(&priv->lock, flags); 2363 priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
2349} 2364}
2350EXPORT_SYMBOL(orinoco_down); 2365EXPORT_SYMBOL(orinoco_down);
2351 2366
diff --git a/drivers/net/wireless/orinoco/main.h b/drivers/net/wireless/orinoco/main.h
index 21ab36cd76c7..4dadf9880a97 100644
--- a/drivers/net/wireless/orinoco/main.h
+++ b/drivers/net/wireless/orinoco/main.h
@@ -33,18 +33,6 @@ int orinoco_commit(struct orinoco_private *priv);
33void orinoco_reset(struct work_struct *work); 33void orinoco_reset(struct work_struct *work);
34 34
35/* Information element helpers - find a home for these... */ 35/* Information element helpers - find a home for these... */
36static inline u8 *orinoco_get_ie(u8 *data, size_t len,
37 enum ieee80211_eid eid)
38{
39 u8 *p = data;
40 while ((p + 2) < (data + len)) {
41 if (p[0] == eid)
42 return p;
43 p += p[1] + 2;
44 }
45 return NULL;
46}
47
48#define WPA_OUI_TYPE "\x00\x50\xF2\x01" 36#define WPA_OUI_TYPE "\x00\x50\xF2\x01"
49#define WPA_SELECTOR_LEN 4 37#define WPA_SELECTOR_LEN 4
50static inline u8 *orinoco_get_wpa_ie(u8 *data, size_t len) 38static inline u8 *orinoco_get_wpa_ie(u8 *data, size_t len)
diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h
index 665ef56f8382..e9f415a56d4d 100644
--- a/drivers/net/wireless/orinoco/orinoco.h
+++ b/drivers/net/wireless/orinoco/orinoco.h
@@ -131,6 +131,8 @@ struct orinoco_private {
131 u16 ap_density, rts_thresh; 131 u16 ap_density, rts_thresh;
132 u16 pm_on, pm_mcast, pm_period, pm_timeout; 132 u16 pm_on, pm_mcast, pm_period, pm_timeout;
133 u16 preamble; 133 u16 preamble;
134 u16 short_retry_limit, long_retry_limit;
135 u16 retry_lifetime;
134#ifdef WIRELESS_SPY 136#ifdef WIRELESS_SPY
135 struct iw_spy_data spy_data; /* iwspy support */ 137 struct iw_spy_data spy_data; /* iwspy support */
136 struct iw_public_data wireless_data; 138 struct iw_public_data wireless_data;
@@ -188,12 +190,24 @@ extern void free_orinocodev(struct orinoco_private *priv);
188extern int orinoco_init(struct orinoco_private *priv); 190extern int orinoco_init(struct orinoco_private *priv);
189extern int orinoco_if_add(struct orinoco_private *priv, 191extern int orinoco_if_add(struct orinoco_private *priv,
190 unsigned long base_addr, 192 unsigned long base_addr,
191 unsigned int irq); 193 unsigned int irq,
194 const struct net_device_ops *ops);
192extern void orinoco_if_del(struct orinoco_private *priv); 195extern void orinoco_if_del(struct orinoco_private *priv);
193extern int orinoco_up(struct orinoco_private *priv); 196extern int orinoco_up(struct orinoco_private *priv);
194extern void orinoco_down(struct orinoco_private *priv); 197extern void orinoco_down(struct orinoco_private *priv);
195extern irqreturn_t orinoco_interrupt(int irq, void *dev_id); 198extern irqreturn_t orinoco_interrupt(int irq, void *dev_id);
196 199
200extern void __orinoco_ev_info(struct net_device *dev, hermes_t *hw);
201extern void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw);
202
203/* Common ndo functions exported for reuse by orinoco_usb */
204int orinoco_open(struct net_device *dev);
205int orinoco_stop(struct net_device *dev);
206struct net_device_stats *orinoco_get_stats(struct net_device *dev);
207void orinoco_set_multicast_list(struct net_device *dev);
208int orinoco_change_mtu(struct net_device *dev, int new_mtu);
209void orinoco_tx_timeout(struct net_device *dev);
210
197/********************************************************************/ 211/********************************************************************/
198/* Locking and synchronization functions */ 212/* Locking and synchronization functions */
199/********************************************************************/ 213/********************************************************************/
@@ -201,11 +215,11 @@ extern irqreturn_t orinoco_interrupt(int irq, void *dev_id);
201static inline int orinoco_lock(struct orinoco_private *priv, 215static inline int orinoco_lock(struct orinoco_private *priv,
202 unsigned long *flags) 216 unsigned long *flags)
203{ 217{
204 spin_lock_irqsave(&priv->lock, *flags); 218 priv->hw.ops->lock_irqsave(&priv->lock, flags);
205 if (priv->hw_unavailable) { 219 if (priv->hw_unavailable) {
206 DEBUG(1, "orinoco_lock() called with hw_unavailable (dev=%p)\n", 220 DEBUG(1, "orinoco_lock() called with hw_unavailable (dev=%p)\n",
207 priv->ndev); 221 priv->ndev);
208 spin_unlock_irqrestore(&priv->lock, *flags); 222 priv->hw.ops->unlock_irqrestore(&priv->lock, flags);
209 return -EBUSY; 223 return -EBUSY;
210 } 224 }
211 return 0; 225 return 0;
@@ -214,7 +228,17 @@ static inline int orinoco_lock(struct orinoco_private *priv,
214static inline void orinoco_unlock(struct orinoco_private *priv, 228static inline void orinoco_unlock(struct orinoco_private *priv,
215 unsigned long *flags) 229 unsigned long *flags)
216{ 230{
217 spin_unlock_irqrestore(&priv->lock, *flags); 231 priv->hw.ops->unlock_irqrestore(&priv->lock, flags);
232}
233
234static inline void orinoco_lock_irq(struct orinoco_private *priv)
235{
236 priv->hw.ops->lock_irq(&priv->lock);
237}
238
239static inline void orinoco_unlock_irq(struct orinoco_private *priv)
240{
241 priv->hw.ops->unlock_irq(&priv->lock);
218} 242}
219 243
220/*** Navigate from net_device to orinoco_private ***/ 244/*** Navigate from net_device to orinoco_private ***/
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index fdc961379170..f99b13ba92b3 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -296,7 +296,7 @@ orinoco_cs_config(struct pcmcia_device *link)
296 296
297 /* Register an interface with the stack */ 297 /* Register an interface with the stack */
298 if (orinoco_if_add(priv, link->io.BasePort1, 298 if (orinoco_if_add(priv, link->io.BasePort1,
299 link->irq.AssignedIRQ) != 0) { 299 link->irq.AssignedIRQ, NULL) != 0) {
300 printk(KERN_ERR PFX "orinoco_if_add() failed\n"); 300 printk(KERN_ERR PFX "orinoco_if_add() failed\n");
301 goto failed; 301 goto failed;
302 } 302 }
@@ -327,9 +327,9 @@ orinoco_cs_release(struct pcmcia_device *link)
327 327
328 /* We're committed to taking the device away now, so mark the 328 /* We're committed to taking the device away now, so mark the
329 * hardware as unavailable */ 329 * hardware as unavailable */
330 spin_lock_irqsave(&priv->lock, flags); 330 priv->hw.ops->lock_irqsave(&priv->lock, &flags);
331 priv->hw_unavailable++; 331 priv->hw_unavailable++;
332 spin_unlock_irqrestore(&priv->lock, flags); 332 priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
333 333
334 pcmcia_disable_device(link); 334 pcmcia_disable_device(link);
335 if (priv->hw.iobase) 335 if (priv->hw.iobase)
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index 075f446b3139..bc3ea0b67a4f 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -220,7 +220,7 @@ static int orinoco_nortel_init_one(struct pci_dev *pdev,
220 goto fail; 220 goto fail;
221 } 221 }
222 222
223 err = orinoco_if_add(priv, 0, 0); 223 err = orinoco_if_add(priv, 0, 0, NULL);
224 if (err) { 224 if (err) {
225 printk(KERN_ERR PFX "orinoco_if_add() failed\n"); 225 printk(KERN_ERR PFX "orinoco_if_add() failed\n");
226 goto fail; 226 goto fail;
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index bda5317cc596..468197f86673 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -170,7 +170,7 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
170 goto fail; 170 goto fail;
171 } 171 }
172 172
173 err = orinoco_if_add(priv, 0, 0); 173 err = orinoco_if_add(priv, 0, 0, NULL);
174 if (err) { 174 if (err) {
175 printk(KERN_ERR PFX "orinoco_if_add() failed\n"); 175 printk(KERN_ERR PFX "orinoco_if_add() failed\n");
176 goto fail; 176 goto fail;
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index e0d5874ab42f..9358f4d2307b 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -259,7 +259,7 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
259 goto fail; 259 goto fail;
260 } 260 }
261 261
262 err = orinoco_if_add(priv, 0, 0); 262 err = orinoco_if_add(priv, 0, 0, NULL);
263 if (err) { 263 if (err) {
264 printk(KERN_ERR PFX "orinoco_if_add() failed\n"); 264 printk(KERN_ERR PFX "orinoco_if_add() failed\n");
265 goto fail; 265 goto fail;
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index 88cbc7902aa0..784605f0af15 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -156,7 +156,7 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
156 goto fail; 156 goto fail;
157 } 157 }
158 158
159 err = orinoco_if_add(priv, 0, 0); 159 err = orinoco_if_add(priv, 0, 0, NULL);
160 if (err) { 160 if (err) {
161 printk(KERN_ERR PFX "orinoco_if_add() failed\n"); 161 printk(KERN_ERR PFX "orinoco_if_add() failed\n");
162 goto fail; 162 goto fail;
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
new file mode 100644
index 000000000000..e22093359f3e
--- /dev/null
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -0,0 +1,1800 @@
1/*
2 * USB Orinoco driver
3 *
4 * Copyright (c) 2003 Manuel Estrada Sainz
5 *
6 * The contents of this file are subject to the Mozilla Public License
7 * Version 1.1 (the "License"); you may not use this file except in
8 * compliance with the License. You may obtain a copy of the License
9 * at http://www.mozilla.org/MPL/
10 *
11 * Software distributed under the License is distributed on an "AS IS"
12 * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
13 * the License for the specific language governing rights and
14 * limitations under the License.
15 *
16 * Alternatively, the contents of this file may be used under the
17 * terms of the GNU General Public License version 2 (the "GPL"), in
18 * which case the provisions of the GPL are applicable instead of the
19 * above. If you wish to allow the use of your version of this file
20 * only under the terms of the GPL and not to allow others to use your
21 * version of this file under the MPL, indicate your decision by
22 * deleting the provisions above and replace them with the notice and
23 * other provisions required by the GPL. If you do not delete the
24 * provisions above, a recipient may use your version of this file
25 * under either the MPL or the GPL.
26 *
27 * Queueing code based on linux-wlan-ng 0.2.1-pre5
28 *
29 * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
30 *
31 * The license is the same as above.
32 *
33 * Initialy based on USB Skeleton driver - 0.7
34 *
35 * Copyright (c) 2001 Greg Kroah-Hartman (greg@kroah.com)
36 *
37 * This program is free software; you can redistribute it and/or
38 * modify it under the terms of the GNU General Public License as
39 * published by the Free Software Foundation; either version 2 of
40 * the License, or (at your option) any later version.
41 *
42 * NOTE: The original USB Skeleton driver is GPL, but all that code is
43 * gone so MPL/GPL applies.
44 */
45
46#define DRIVER_NAME "orinoco_usb"
47#define PFX DRIVER_NAME ": "
48
49#include <linux/module.h>
50#include <linux/kernel.h>
51#include <linux/sched.h>
52#include <linux/signal.h>
53#include <linux/errno.h>
54#include <linux/poll.h>
55#include <linux/init.h>
56#include <linux/slab.h>
57#include <linux/fcntl.h>
58#include <linux/spinlock.h>
59#include <linux/list.h>
60#include <linux/smp_lock.h>
61#include <linux/usb.h>
62#include <linux/timer.h>
63
64#include <linux/netdevice.h>
65#include <linux/if_arp.h>
66#include <linux/etherdevice.h>
67#include <linux/wireless.h>
68#include <linux/firmware.h>
69
70#include "orinoco.h"
71
72#ifndef URB_ASYNC_UNLINK
73#define URB_ASYNC_UNLINK 0
74#endif
75
76/* 802.2 LLC/SNAP header used for Ethernet encapsulation over 802.11 */
77static const u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
78#define ENCAPS_OVERHEAD (sizeof(encaps_hdr) + 2)
79
80struct header_struct {
81 /* 802.3 */
82 u8 dest[ETH_ALEN];
83 u8 src[ETH_ALEN];
84 __be16 len;
85 /* 802.2 */
86 u8 dsap;
87 u8 ssap;
88 u8 ctrl;
89 /* SNAP */
90 u8 oui[3];
91 __be16 ethertype;
92} __attribute__ ((packed));
93
94struct ez_usb_fw {
95 u16 size;
96 const u8 *code;
97};
98
99static struct ez_usb_fw firmware = {
100 .size = 0,
101 .code = NULL,
102};
103
104#ifdef CONFIG_USB_DEBUG
105static int debug = 1;
106#else
107static int debug;
108#endif
109
110/* Debugging macros */
111#undef dbg
112#define dbg(format, arg...) \
113 do { if (debug) printk(KERN_DEBUG PFX "%s: " format "\n", \
114 __func__ , ## arg); } while (0)
115#undef err
116#define err(format, arg...) \
117 do { printk(KERN_ERR PFX format "\n", ## arg); } while (0)
118
119/* Module paramaters */
120module_param(debug, int, 0644);
121MODULE_PARM_DESC(debug, "Debug enabled or not");
122
123MODULE_FIRMWARE("orinoco_ezusb_fw");
124
125/*
126 * Under some conditions, the card gets stuck and stops paying attention
127 * to the world (i.e. data communication stalls) until we do something to
128 * it. Sending an INQ_TALLIES command seems to be enough and should be
129 * harmless otherwise. This behaviour has been observed when using the
130 * driver on a systemimager client during installation. In the past a
131 * timer was used to send INQ_TALLIES commands when there was no other
132 * activity, but it was troublesome and was removed.
133 */
134
135#define USB_COMPAQ_VENDOR_ID 0x049f /* Compaq Computer Corp. */
136#define USB_COMPAQ_WL215_ID 0x001f /* Compaq WL215 USB Adapter */
137#define USB_COMPAQ_W200_ID 0x0076 /* Compaq W200 USB Adapter */
138#define USB_HP_WL215_ID 0x0082 /* Compaq WL215 USB Adapter */
139
140#define USB_MELCO_VENDOR_ID 0x0411
141#define USB_BUFFALO_L11_ID 0x0006 /* BUFFALO WLI-USB-L11 */
142#define USB_BUFFALO_L11G_WR_ID 0x000B /* BUFFALO WLI-USB-L11G-WR */
143#define USB_BUFFALO_L11G_ID 0x000D /* BUFFALO WLI-USB-L11G */
144
145#define USB_LUCENT_VENDOR_ID 0x047E /* Lucent Technologies */
146#define USB_LUCENT_ORINOCO_ID 0x0300 /* Lucent/Agere Orinoco USB Client */
147
148#define USB_AVAYA8_VENDOR_ID 0x0D98
149#define USB_AVAYAE_VENDOR_ID 0x0D9E
150#define USB_AVAYA_WIRELESS_ID 0x0300 /* Avaya Wireless USB Card */
151
152#define USB_AGERE_VENDOR_ID 0x0D4E /* Agere Systems */
153#define USB_AGERE_MODEL0801_ID 0x1000 /* Wireless USB Card Model 0801 */
154#define USB_AGERE_MODEL0802_ID 0x1001 /* Wireless USB Card Model 0802 */
155#define USB_AGERE_REBRANDED_ID 0x047A /* WLAN USB Card */
156
157#define USB_ELSA_VENDOR_ID 0x05CC
158#define USB_ELSA_AIRLANCER_ID 0x3100 /* ELSA AirLancer USB-11 */
159
160#define USB_LEGEND_VENDOR_ID 0x0E7C
161#define USB_LEGEND_JOYNET_ID 0x0300 /* Joynet WLAN USB Card */
162
163#define USB_SAMSUNG_VENDOR_ID 0x04E8
164#define USB_SAMSUNG_SEW2001U1_ID 0x5002 /* Samsung SEW-2001u Card */
165#define USB_SAMSUNG_SEW2001U2_ID 0x5B11 /* Samsung SEW-2001u Card */
166#define USB_SAMSUNG_SEW2003U_ID 0x7011 /* Samsung SEW-2003U Card */
167
168#define USB_IGATE_VENDOR_ID 0x0681
169#define USB_IGATE_IGATE_11M_ID 0x0012 /* I-GATE 11M USB Card */
170
171#define USB_FUJITSU_VENDOR_ID 0x0BF8
172#define USB_FUJITSU_E1100_ID 0x1002 /* connect2AIR WLAN E-1100 USB */
173
174#define USB_2WIRE_VENDOR_ID 0x1630
175#define USB_2WIRE_WIRELESS_ID 0xff81 /* 2Wire Wireless USB adapter */
176
177
178#define EZUSB_REQUEST_FW_TRANS 0xA0
179#define EZUSB_REQUEST_TRIGER 0xAA
180#define EZUSB_REQUEST_TRIG_AC 0xAC
181#define EZUSB_CPUCS_REG 0x7F92
182
183#define EZUSB_RID_TX 0x0700
184#define EZUSB_RID_RX 0x0701
185#define EZUSB_RID_INIT1 0x0702
186#define EZUSB_RID_ACK 0x0710
187#define EZUSB_RID_READ_PDA 0x0800
188#define EZUSB_RID_PROG_INIT 0x0852
189#define EZUSB_RID_PROG_SET_ADDR 0x0853
190#define EZUSB_RID_PROG_BYTES 0x0854
191#define EZUSB_RID_PROG_END 0x0855
192#define EZUSB_RID_DOCMD 0x0860
193
194/* Recognize info frames */
195#define EZUSB_IS_INFO(id) ((id >= 0xF000) && (id <= 0xF2FF))
196
197#define EZUSB_MAGIC 0x0210
198
199#define EZUSB_FRAME_DATA 1
200#define EZUSB_FRAME_CONTROL 2
201
202#define DEF_TIMEOUT (3*HZ)
203
204#define BULK_BUF_SIZE 2048
205
206#define MAX_DL_SIZE (BULK_BUF_SIZE - sizeof(struct ezusb_packet))
207
208#define FW_BUF_SIZE 64
209#define FW_VAR_OFFSET_PTR 0x359
210#define FW_VAR_VALUE 0
211#define FW_HOLE_START 0x100
212#define FW_HOLE_END 0x300
213
214struct ezusb_packet {
215 __le16 magic; /* 0x0210 */
216 u8 req_reply_count;
217 u8 ans_reply_count;
218 __le16 frame_type; /* 0x01 for data frames, 0x02 otherwise */
219 __le16 size; /* transport size */
220 __le16 crc; /* CRC up to here */
221 __le16 hermes_len;
222 __le16 hermes_rid;
223 u8 data[0];
224} __attribute__ ((packed));
225
226/* Table of devices that work or may work with this driver */
227static struct usb_device_id ezusb_table[] = {
228 {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_COMPAQ_WL215_ID)},
229 {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_HP_WL215_ID)},
230 {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_COMPAQ_W200_ID)},
231 {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11_ID)},
232 {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11G_WR_ID)},
233 {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11G_ID)},
234 {USB_DEVICE(USB_LUCENT_VENDOR_ID, USB_LUCENT_ORINOCO_ID)},
235 {USB_DEVICE(USB_AVAYA8_VENDOR_ID, USB_AVAYA_WIRELESS_ID)},
236 {USB_DEVICE(USB_AVAYAE_VENDOR_ID, USB_AVAYA_WIRELESS_ID)},
237 {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_MODEL0801_ID)},
238 {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_MODEL0802_ID)},
239 {USB_DEVICE(USB_ELSA_VENDOR_ID, USB_ELSA_AIRLANCER_ID)},
240 {USB_DEVICE(USB_LEGEND_VENDOR_ID, USB_LEGEND_JOYNET_ID)},
241 {USB_DEVICE_VER(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2001U1_ID,
242 0, 0)},
243 {USB_DEVICE(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2001U2_ID)},
244 {USB_DEVICE(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2003U_ID)},
245 {USB_DEVICE(USB_IGATE_VENDOR_ID, USB_IGATE_IGATE_11M_ID)},
246 {USB_DEVICE(USB_FUJITSU_VENDOR_ID, USB_FUJITSU_E1100_ID)},
247 {USB_DEVICE(USB_2WIRE_VENDOR_ID, USB_2WIRE_WIRELESS_ID)},
248 {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_REBRANDED_ID)},
249 {} /* Terminating entry */
250};
251
252MODULE_DEVICE_TABLE(usb, ezusb_table);
253
254/* Structure to hold all of our device specific stuff */
255struct ezusb_priv {
256 struct usb_device *udev;
257 struct net_device *dev;
258 struct mutex mtx;
259 spinlock_t req_lock;
260 struct list_head req_pending;
261 struct list_head req_active;
262 spinlock_t reply_count_lock;
263 u16 hermes_reg_fake[0x40];
264 u8 *bap_buf;
265 struct urb *read_urb;
266 int read_pipe;
267 int write_pipe;
268 u8 reply_count;
269};
270
271enum ezusb_state {
272 EZUSB_CTX_START,
273 EZUSB_CTX_QUEUED,
274 EZUSB_CTX_REQ_SUBMITTED,
275 EZUSB_CTX_REQ_COMPLETE,
276 EZUSB_CTX_RESP_RECEIVED,
277 EZUSB_CTX_REQ_TIMEOUT,
278 EZUSB_CTX_REQ_FAILED,
279 EZUSB_CTX_RESP_TIMEOUT,
280 EZUSB_CTX_REQSUBMIT_FAIL,
281 EZUSB_CTX_COMPLETE,
282};
283
284struct request_context {
285 struct list_head list;
286 atomic_t refcount;
287 struct completion done; /* Signals that CTX is dead */
288 int killed;
289 struct urb *outurb; /* OUT for req pkt */
290 struct ezusb_priv *upriv;
291 struct ezusb_packet *buf;
292 int buf_length;
293 struct timer_list timer; /* Timeout handling */
294 enum ezusb_state state; /* Current state */
295 /* the RID that we will wait for */
296 u16 out_rid;
297 u16 in_rid;
298};
299
300
301/* Forward declarations */
302static void ezusb_ctx_complete(struct request_context *ctx);
303static void ezusb_req_queue_run(struct ezusb_priv *upriv);
304static void ezusb_bulk_in_callback(struct urb *urb);
305
306static inline u8 ezusb_reply_inc(u8 count)
307{
308 if (count < 0x7F)
309 return count + 1;
310 else
311 return 1;
312}
313
314static void ezusb_request_context_put(struct request_context *ctx)
315{
316 if (!atomic_dec_and_test(&ctx->refcount))
317 return;
318
319 WARN_ON(!ctx->done.done);
320 BUG_ON(ctx->outurb->status == -EINPROGRESS);
321 BUG_ON(timer_pending(&ctx->timer));
322 usb_free_urb(ctx->outurb);
323 kfree(ctx->buf);
324 kfree(ctx);
325}
326
327static inline void ezusb_mod_timer(struct ezusb_priv *upriv,
328 struct timer_list *timer,
329 unsigned long expire)
330{
331 if (!upriv->udev)
332 return;
333 mod_timer(timer, expire);
334}
335
336static void ezusb_request_timerfn(u_long _ctx)
337{
338 struct request_context *ctx = (void *) _ctx;
339
340 ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
341 if (usb_unlink_urb(ctx->outurb) == -EINPROGRESS) {
342 ctx->state = EZUSB_CTX_REQ_TIMEOUT;
343 } else {
344 ctx->state = EZUSB_CTX_RESP_TIMEOUT;
345 dbg("couldn't unlink");
346 atomic_inc(&ctx->refcount);
347 ctx->killed = 1;
348 ezusb_ctx_complete(ctx);
349 ezusb_request_context_put(ctx);
350 }
351};
352
353static struct request_context *ezusb_alloc_ctx(struct ezusb_priv *upriv,
354 u16 out_rid, u16 in_rid)
355{
356 struct request_context *ctx;
357
358 ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
359 if (!ctx)
360 return NULL;
361
362 memset(ctx, 0, sizeof(*ctx));
363
364 ctx->buf = kmalloc(BULK_BUF_SIZE, GFP_ATOMIC);
365 if (!ctx->buf) {
366 kfree(ctx);
367 return NULL;
368 }
369 ctx->outurb = usb_alloc_urb(0, GFP_ATOMIC);
370 if (!ctx->outurb) {
371 kfree(ctx->buf);
372 kfree(ctx);
373 return NULL;
374 }
375
376 ctx->upriv = upriv;
377 ctx->state = EZUSB_CTX_START;
378 ctx->out_rid = out_rid;
379 ctx->in_rid = in_rid;
380
381 atomic_set(&ctx->refcount, 1);
382 init_completion(&ctx->done);
383
384 init_timer(&ctx->timer);
385 ctx->timer.function = ezusb_request_timerfn;
386 ctx->timer.data = (u_long) ctx;
387 return ctx;
388}
389
390
391/* Hopefully the real complete_all will soon be exported, in the mean
392 * while this should work. */
393static inline void ezusb_complete_all(struct completion *comp)
394{
395 complete(comp);
396 complete(comp);
397 complete(comp);
398 complete(comp);
399}
400
401static void ezusb_ctx_complete(struct request_context *ctx)
402{
403 struct ezusb_priv *upriv = ctx->upriv;
404 unsigned long flags;
405
406 spin_lock_irqsave(&upriv->req_lock, flags);
407
408 list_del_init(&ctx->list);
409 if (upriv->udev) {
410 spin_unlock_irqrestore(&upriv->req_lock, flags);
411 ezusb_req_queue_run(upriv);
412 spin_lock_irqsave(&upriv->req_lock, flags);
413 }
414
415 switch (ctx->state) {
416 case EZUSB_CTX_COMPLETE:
417 case EZUSB_CTX_REQSUBMIT_FAIL:
418 case EZUSB_CTX_REQ_FAILED:
419 case EZUSB_CTX_REQ_TIMEOUT:
420 case EZUSB_CTX_RESP_TIMEOUT:
421 spin_unlock_irqrestore(&upriv->req_lock, flags);
422
423 if ((ctx->out_rid == EZUSB_RID_TX) && upriv->dev) {
424 struct net_device *dev = upriv->dev;
425 struct orinoco_private *priv = ndev_priv(dev);
426 struct net_device_stats *stats = &priv->stats;
427
428 if (ctx->state != EZUSB_CTX_COMPLETE)
429 stats->tx_errors++;
430 else
431 stats->tx_packets++;
432
433 netif_wake_queue(dev);
434 }
435 ezusb_complete_all(&ctx->done);
436 ezusb_request_context_put(ctx);
437 break;
438
439 default:
440 spin_unlock_irqrestore(&upriv->req_lock, flags);
441 if (!upriv->udev) {
442 /* This is normal, as all request contexts get flushed
443 * when the device is disconnected */
444 err("Called, CTX not terminating, but device gone");
445 ezusb_complete_all(&ctx->done);
446 ezusb_request_context_put(ctx);
447 break;
448 }
449
450 err("Called, CTX not in terminating state.");
451 /* Things are really bad if this happens. Just leak
452 * the CTX because it may still be linked to the
453 * queue or the OUT urb may still be active.
454 * Just leaking at least prevents an Oops or Panic.
455 */
456 break;
457 }
458}
459
460/**
461 * ezusb_req_queue_run:
462 * Description:
463 * Note: Only one active CTX at any one time, because there's no
464 * other (reliable) way to match the response URB to the correct
465 * CTX.
466 **/
467static void ezusb_req_queue_run(struct ezusb_priv *upriv)
468{
469 unsigned long flags;
470 struct request_context *ctx;
471 int result;
472
473 spin_lock_irqsave(&upriv->req_lock, flags);
474
475 if (!list_empty(&upriv->req_active))
476 goto unlock;
477
478 if (list_empty(&upriv->req_pending))
479 goto unlock;
480
481 ctx =
482 list_entry(upriv->req_pending.next, struct request_context,
483 list);
484
485 if (!ctx->upriv->udev)
486 goto unlock;
487
488 /* We need to split this off to avoid a race condition */
489 list_move_tail(&ctx->list, &upriv->req_active);
490
491 if (ctx->state == EZUSB_CTX_QUEUED) {
492 atomic_inc(&ctx->refcount);
493 result = usb_submit_urb(ctx->outurb, GFP_ATOMIC);
494 if (result) {
495 ctx->state = EZUSB_CTX_REQSUBMIT_FAIL;
496
497 spin_unlock_irqrestore(&upriv->req_lock, flags);
498
499 err("Fatal, failed to submit command urb."
500 " error=%d\n", result);
501
502 ezusb_ctx_complete(ctx);
503 ezusb_request_context_put(ctx);
504 goto done;
505 }
506
507 ctx->state = EZUSB_CTX_REQ_SUBMITTED;
508 ezusb_mod_timer(ctx->upriv, &ctx->timer,
509 jiffies + DEF_TIMEOUT);
510 }
511
512 unlock:
513 spin_unlock_irqrestore(&upriv->req_lock, flags);
514
515 done:
516 return;
517}
518
519static void ezusb_req_enqueue_run(struct ezusb_priv *upriv,
520 struct request_context *ctx)
521{
522 unsigned long flags;
523
524 spin_lock_irqsave(&upriv->req_lock, flags);
525
526 if (!ctx->upriv->udev) {
527 spin_unlock_irqrestore(&upriv->req_lock, flags);
528 goto done;
529 }
530 atomic_inc(&ctx->refcount);
531 list_add_tail(&ctx->list, &upriv->req_pending);
532 spin_unlock_irqrestore(&upriv->req_lock, flags);
533
534 ctx->state = EZUSB_CTX_QUEUED;
535 ezusb_req_queue_run(upriv);
536
537 done:
538 return;
539}
540
541static void ezusb_request_out_callback(struct urb *urb)
542{
543 unsigned long flags;
544 enum ezusb_state state;
545 struct request_context *ctx = urb->context;
546 struct ezusb_priv *upriv = ctx->upriv;
547
548 spin_lock_irqsave(&upriv->req_lock, flags);
549
550 del_timer(&ctx->timer);
551
552 if (ctx->killed) {
553 spin_unlock_irqrestore(&upriv->req_lock, flags);
554 pr_warning("interrupt called with dead ctx");
555 goto out;
556 }
557
558 state = ctx->state;
559
560 if (urb->status == 0) {
561 switch (state) {
562 case EZUSB_CTX_REQ_SUBMITTED:
563 if (ctx->in_rid) {
564 ctx->state = EZUSB_CTX_REQ_COMPLETE;
565 /* reply URB still pending */
566 ezusb_mod_timer(upriv, &ctx->timer,
567 jiffies + DEF_TIMEOUT);
568 spin_unlock_irqrestore(&upriv->req_lock,
569 flags);
570 break;
571 }
572 /* fall through */
573 case EZUSB_CTX_RESP_RECEIVED:
574 /* IN already received before this OUT-ACK */
575 ctx->state = EZUSB_CTX_COMPLETE;
576 spin_unlock_irqrestore(&upriv->req_lock, flags);
577 ezusb_ctx_complete(ctx);
578 break;
579
580 default:
581 spin_unlock_irqrestore(&upriv->req_lock, flags);
582 err("Unexpected state(0x%x, %d) in OUT URB",
583 state, urb->status);
584 break;
585 }
586 } else {
587 /* If someone cancels the OUT URB then its status
588 * should be either -ECONNRESET or -ENOENT.
589 */
590 switch (state) {
591 case EZUSB_CTX_REQ_SUBMITTED:
592 case EZUSB_CTX_RESP_RECEIVED:
593 ctx->state = EZUSB_CTX_REQ_FAILED;
594 /* fall through */
595
596 case EZUSB_CTX_REQ_FAILED:
597 case EZUSB_CTX_REQ_TIMEOUT:
598 spin_unlock_irqrestore(&upriv->req_lock, flags);
599
600 ezusb_ctx_complete(ctx);
601 break;
602
603 default:
604 spin_unlock_irqrestore(&upriv->req_lock, flags);
605
606 err("Unexpected state(0x%x, %d) in OUT URB",
607 state, urb->status);
608 break;
609 }
610 }
611 out:
612 ezusb_request_context_put(ctx);
613}
614
615static void ezusb_request_in_callback(struct ezusb_priv *upriv,
616 struct urb *urb)
617{
618 struct ezusb_packet *ans = urb->transfer_buffer;
619 struct request_context *ctx = NULL;
620 enum ezusb_state state;
621 unsigned long flags;
622
623 /* Find the CTX on the active queue that requested this URB */
624 spin_lock_irqsave(&upriv->req_lock, flags);
625 if (upriv->udev) {
626 struct list_head *item;
627
628 list_for_each(item, &upriv->req_active) {
629 struct request_context *c;
630 int reply_count;
631
632 c = list_entry(item, struct request_context, list);
633 reply_count =
634 ezusb_reply_inc(c->buf->req_reply_count);
635 if ((ans->ans_reply_count == reply_count)
636 && (le16_to_cpu(ans->hermes_rid) == c->in_rid)) {
637 ctx = c;
638 break;
639 }
640 dbg("Skipped (0x%x/0x%x) (%d/%d)",
641 le16_to_cpu(ans->hermes_rid),
642 c->in_rid, ans->ans_reply_count, reply_count);
643 }
644 }
645
646 if (ctx == NULL) {
647 spin_unlock_irqrestore(&upriv->req_lock, flags);
648 err("%s: got unexpected RID: 0x%04X", __func__,
649 le16_to_cpu(ans->hermes_rid));
650 ezusb_req_queue_run(upriv);
651 return;
652 }
653
654 /* The data we want is in the in buffer, exchange */
655 urb->transfer_buffer = ctx->buf;
656 ctx->buf = (void *) ans;
657 ctx->buf_length = urb->actual_length;
658
659 state = ctx->state;
660 switch (state) {
661 case EZUSB_CTX_REQ_SUBMITTED:
662 /* We have received our response URB before
663 * our request has been acknowledged. Do NOT
664 * destroy our CTX yet, because our OUT URB
665 * is still alive ...
666 */
667 ctx->state = EZUSB_CTX_RESP_RECEIVED;
668 spin_unlock_irqrestore(&upriv->req_lock, flags);
669
670 /* Let the machine continue running. */
671 break;
672
673 case EZUSB_CTX_REQ_COMPLETE:
674 /* This is the usual path: our request
675 * has already been acknowledged, and
676 * we have now received the reply.
677 */
678 ctx->state = EZUSB_CTX_COMPLETE;
679
680 /* Stop the intimer */
681 del_timer(&ctx->timer);
682 spin_unlock_irqrestore(&upriv->req_lock, flags);
683
684 /* Call the completion handler */
685 ezusb_ctx_complete(ctx);
686 break;
687
688 default:
689 spin_unlock_irqrestore(&upriv->req_lock, flags);
690
691 pr_warning("Matched IN URB, unexpected context state(0x%x)",
692 state);
693 /* Throw this CTX away and try submitting another */
694 del_timer(&ctx->timer);
695 ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
696 usb_unlink_urb(ctx->outurb);
697 ezusb_req_queue_run(upriv);
698 break;
699 } /* switch */
700}
701
702
703static void ezusb_req_ctx_wait(struct ezusb_priv *upriv,
704 struct request_context *ctx)
705{
706 switch (ctx->state) {
707 case EZUSB_CTX_QUEUED:
708 case EZUSB_CTX_REQ_SUBMITTED:
709 case EZUSB_CTX_REQ_COMPLETE:
710 case EZUSB_CTX_RESP_RECEIVED:
711 if (in_softirq()) {
712 /* If we get called from a timer, timeout timers don't
713 * get the chance to run themselves. So we make sure
714 * that we don't sleep for ever */
715 int msecs = DEF_TIMEOUT * (1000 / HZ);
716 while (!ctx->done.done && msecs--)
717 udelay(1000);
718 } else {
719 wait_event_interruptible(ctx->done.wait,
720 ctx->done.done);
721 }
722 break;
723 default:
724 /* Done or failed - nothing to wait for */
725 break;
726 }
727}
728
729static inline u16 build_crc(struct ezusb_packet *data)
730{
731 u16 crc = 0;
732 u8 *bytes = (u8 *)data;
733 int i;
734
735 for (i = 0; i < 8; i++)
736 crc = (crc << 1) + bytes[i];
737
738 return crc;
739}
740
741/**
742 * ezusb_fill_req:
743 *
744 * if data == NULL and length > 0 the data is assumed to be already in
745 * the target buffer and only the header is filled.
746 *
747 */
748static int ezusb_fill_req(struct ezusb_packet *req, u16 length, u16 rid,
749 const void *data, u16 frame_type, u8 reply_count)
750{
751 int total_size = sizeof(*req) + length;
752
753 BUG_ON(total_size > BULK_BUF_SIZE);
754
755 req->magic = cpu_to_le16(EZUSB_MAGIC);
756 req->req_reply_count = reply_count;
757 req->ans_reply_count = 0;
758 req->frame_type = cpu_to_le16(frame_type);
759 req->size = cpu_to_le16(length + 4);
760 req->crc = cpu_to_le16(build_crc(req));
761 req->hermes_len = cpu_to_le16(HERMES_BYTES_TO_RECLEN(length));
762 req->hermes_rid = cpu_to_le16(rid);
763 if (data)
764 memcpy(req->data, data, length);
765 return total_size;
766}
767
768static int ezusb_submit_in_urb(struct ezusb_priv *upriv)
769{
770 int retval = 0;
771 void *cur_buf = upriv->read_urb->transfer_buffer;
772
773 if (upriv->read_urb->status == -EINPROGRESS) {
774 dbg("urb busy, not resubmiting");
775 retval = -EBUSY;
776 goto exit;
777 }
778 usb_fill_bulk_urb(upriv->read_urb, upriv->udev, upriv->read_pipe,
779 cur_buf, BULK_BUF_SIZE,
780 ezusb_bulk_in_callback, upriv);
781 upriv->read_urb->transfer_flags = 0;
782 retval = usb_submit_urb(upriv->read_urb, GFP_ATOMIC);
783 if (retval)
784 err("%s submit failed %d", __func__, retval);
785
786 exit:
787 return retval;
788}
789
790static inline int ezusb_8051_cpucs(struct ezusb_priv *upriv, int reset)
791{
792 u8 res_val = reset; /* avoid argument promotion */
793
794 if (!upriv->udev) {
795 err("%s: !upriv->udev", __func__);
796 return -EFAULT;
797 }
798 return usb_control_msg(upriv->udev,
799 usb_sndctrlpipe(upriv->udev, 0),
800 EZUSB_REQUEST_FW_TRANS,
801 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
802 USB_DIR_OUT, EZUSB_CPUCS_REG, 0, &res_val,
803 sizeof(res_val), DEF_TIMEOUT);
804}
805
806static int ezusb_firmware_download(struct ezusb_priv *upriv,
807 struct ez_usb_fw *fw)
808{
809 u8 fw_buffer[FW_BUF_SIZE];
810 int retval, addr;
811 int variant_offset;
812
813 /*
814 * This byte is 1 and should be replaced with 0. The offset is
815 * 0x10AD in version 0.0.6. The byte in question should follow
816 * the end of the code pointed to by the jump in the beginning
817 * of the firmware. Also, it is read by code located at 0x358.
818 */
819 variant_offset = be16_to_cpup((__be16 *) &fw->code[FW_VAR_OFFSET_PTR]);
820 if (variant_offset >= fw->size) {
821 printk(KERN_ERR PFX "Invalid firmware variant offset: "
822 "0x%04x\n", variant_offset);
823 retval = -EINVAL;
824 goto fail;
825 }
826
827 retval = ezusb_8051_cpucs(upriv, 1);
828 if (retval < 0)
829 goto fail;
830 for (addr = 0; addr < fw->size; addr += FW_BUF_SIZE) {
831 /* 0x100-0x300 should be left alone, it contains card
832 * specific data, like USB enumeration information */
833 if ((addr >= FW_HOLE_START) && (addr < FW_HOLE_END))
834 continue;
835
836 memcpy(fw_buffer, &fw->code[addr], FW_BUF_SIZE);
837 if (variant_offset >= addr &&
838 variant_offset < addr + FW_BUF_SIZE) {
839 dbg("Patching card_variant byte at 0x%04X",
840 variant_offset);
841 fw_buffer[variant_offset - addr] = FW_VAR_VALUE;
842 }
843 retval = usb_control_msg(upriv->udev,
844 usb_sndctrlpipe(upriv->udev, 0),
845 EZUSB_REQUEST_FW_TRANS,
846 USB_TYPE_VENDOR | USB_RECIP_DEVICE
847 | USB_DIR_OUT,
848 addr, 0x0,
849 fw_buffer, FW_BUF_SIZE,
850 DEF_TIMEOUT);
851
852 if (retval < 0)
853 goto fail;
854 }
855 retval = ezusb_8051_cpucs(upriv, 0);
856 if (retval < 0)
857 goto fail;
858
859 goto exit;
860 fail:
861 printk(KERN_ERR PFX "Firmware download failed, error %d\n",
862 retval);
863 exit:
864 return retval;
865}
866
867static int ezusb_access_ltv(struct ezusb_priv *upriv,
868 struct request_context *ctx,
869 u16 length, const void *data, u16 frame_type,
870 void *ans_buff, int ans_size, u16 *ans_length)
871{
872 int req_size;
873 int retval = 0;
874 enum ezusb_state state;
875
876 BUG_ON(in_irq());
877
878 if (!upriv->udev) {
879 dbg("Device disconnected");
880 return -ENODEV;
881 }
882
883 if (upriv->read_urb->status != -EINPROGRESS)
884 err("%s: in urb not pending", __func__);
885
886 /* protect upriv->reply_count, guarantee sequential numbers */
887 spin_lock_bh(&upriv->reply_count_lock);
888 req_size = ezusb_fill_req(ctx->buf, length, ctx->out_rid, data,
889 frame_type, upriv->reply_count);
890 usb_fill_bulk_urb(ctx->outurb, upriv->udev, upriv->write_pipe,
891 ctx->buf, req_size,
892 ezusb_request_out_callback, ctx);
893
894 if (ctx->in_rid)
895 upriv->reply_count = ezusb_reply_inc(upriv->reply_count);
896
897 ezusb_req_enqueue_run(upriv, ctx);
898
899 spin_unlock_bh(&upriv->reply_count_lock);
900
901 if (ctx->in_rid)
902 ezusb_req_ctx_wait(upriv, ctx);
903
904 state = ctx->state;
905 switch (state) {
906 case EZUSB_CTX_COMPLETE:
907 retval = ctx->outurb->status;
908 break;
909
910 case EZUSB_CTX_QUEUED:
911 case EZUSB_CTX_REQ_SUBMITTED:
912 if (!ctx->in_rid)
913 break;
914 default:
915 err("%s: Unexpected context state %d", __func__,
916 state);
917 /* fall though */
918 case EZUSB_CTX_REQ_TIMEOUT:
919 case EZUSB_CTX_REQ_FAILED:
920 case EZUSB_CTX_RESP_TIMEOUT:
921 case EZUSB_CTX_REQSUBMIT_FAIL:
922 printk(KERN_ERR PFX "Access failed, resetting (state %d,"
923 " reply_count %d)\n", state, upriv->reply_count);
924 upriv->reply_count = 0;
925 if (state == EZUSB_CTX_REQ_TIMEOUT
926 || state == EZUSB_CTX_RESP_TIMEOUT) {
927 printk(KERN_ERR PFX "ctx timed out\n");
928 retval = -ETIMEDOUT;
929 } else {
930 printk(KERN_ERR PFX "ctx failed\n");
931 retval = -EFAULT;
932 }
933 goto exit;
934 break;
935 }
936 if (ctx->in_rid) {
937 struct ezusb_packet *ans = ctx->buf;
938 int exp_len;
939
940 if (ans->hermes_len != 0)
941 exp_len = le16_to_cpu(ans->hermes_len) * 2 + 12;
942 else
943 exp_len = 14;
944
945 if (exp_len != ctx->buf_length) {
946 err("%s: length mismatch for RID 0x%04x: "
947 "expected %d, got %d", __func__,
948 ctx->in_rid, exp_len, ctx->buf_length);
949 retval = -EIO;
950 goto exit;
951 }
952
953 if (ans_buff)
954 memcpy(ans_buff, ans->data,
955 min_t(int, exp_len, ans_size));
956 if (ans_length)
957 *ans_length = le16_to_cpu(ans->hermes_len);
958 }
959 exit:
960 ezusb_request_context_put(ctx);
961 return retval;
962}
963
964static int ezusb_write_ltv(hermes_t *hw, int bap, u16 rid,
965 u16 length, const void *data)
966{
967 struct ezusb_priv *upriv = hw->priv;
968 u16 frame_type;
969 struct request_context *ctx;
970
971 if (length == 0)
972 return -EINVAL;
973
974 length = HERMES_RECLEN_TO_BYTES(length);
975
976 /* On memory mapped devices HERMES_RID_CNFGROUPADDRESSES can be
977 * set to be empty, but the USB bridge doesn't like it */
978 if (length == 0)
979 return 0;
980
981 ctx = ezusb_alloc_ctx(upriv, rid, EZUSB_RID_ACK);
982 if (!ctx)
983 return -ENOMEM;
984
985 if (rid == EZUSB_RID_TX)
986 frame_type = EZUSB_FRAME_DATA;
987 else
988 frame_type = EZUSB_FRAME_CONTROL;
989
990 return ezusb_access_ltv(upriv, ctx, length, data, frame_type,
991 NULL, 0, NULL);
992}
993
994static int ezusb_read_ltv(hermes_t *hw, int bap, u16 rid,
995 unsigned bufsize, u16 *length, void *buf)
996{
997 struct ezusb_priv *upriv = hw->priv;
998 struct request_context *ctx;
999
1000 if ((bufsize < 0) || (bufsize % 2))
1001 return -EINVAL;
1002
1003 ctx = ezusb_alloc_ctx(upriv, rid, rid);
1004 if (!ctx)
1005 return -ENOMEM;
1006
1007 return ezusb_access_ltv(upriv, ctx, 0, NULL, EZUSB_FRAME_CONTROL,
1008 buf, bufsize, length);
1009}
1010
1011static int ezusb_doicmd_wait(hermes_t *hw, u16 cmd, u16 parm0, u16 parm1,
1012 u16 parm2, struct hermes_response *resp)
1013{
1014 struct ezusb_priv *upriv = hw->priv;
1015 struct request_context *ctx;
1016
1017 __le16 data[4] = {
1018 cpu_to_le16(cmd),
1019 cpu_to_le16(parm0),
1020 cpu_to_le16(parm1),
1021 cpu_to_le16(parm2),
1022 };
1023 dbg("0x%04X, parm0 0x%04X, parm1 0x%04X, parm2 0x%04X",
1024 cmd, parm0, parm1, parm2);
1025 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_DOCMD, EZUSB_RID_ACK);
1026 if (!ctx)
1027 return -ENOMEM;
1028
1029 return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
1030 EZUSB_FRAME_CONTROL, NULL, 0, NULL);
1031}
1032
1033static int ezusb_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
1034 struct hermes_response *resp)
1035{
1036 struct ezusb_priv *upriv = hw->priv;
1037 struct request_context *ctx;
1038
1039 __le16 data[4] = {
1040 cpu_to_le16(cmd),
1041 cpu_to_le16(parm0),
1042 0,
1043 0,
1044 };
1045 dbg("0x%04X, parm0 0x%04X", cmd, parm0);
1046 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_DOCMD, EZUSB_RID_ACK);
1047 if (!ctx)
1048 return -ENOMEM;
1049
1050 return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
1051 EZUSB_FRAME_CONTROL, NULL, 0, NULL);
1052}
1053
1054static int ezusb_bap_pread(struct hermes *hw, int bap,
1055 void *buf, int len, u16 id, u16 offset)
1056{
1057 struct ezusb_priv *upriv = hw->priv;
1058 struct ezusb_packet *ans = (void *) upriv->read_urb->transfer_buffer;
1059 int actual_length = upriv->read_urb->actual_length;
1060
1061 if (id == EZUSB_RID_RX) {
1062 if ((sizeof(*ans) + offset + len) > actual_length) {
1063 printk(KERN_ERR PFX "BAP read beyond buffer end "
1064 "in rx frame\n");
1065 return -EINVAL;
1066 }
1067 memcpy(buf, ans->data + offset, len);
1068 return 0;
1069 }
1070
1071 if (EZUSB_IS_INFO(id)) {
1072 /* Include 4 bytes for length/type */
1073 if ((sizeof(*ans) + offset + len - 4) > actual_length) {
1074 printk(KERN_ERR PFX "BAP read beyond buffer end "
1075 "in info frame\n");
1076 return -EFAULT;
1077 }
1078 memcpy(buf, ans->data + offset - 4, len);
1079 } else {
1080 printk(KERN_ERR PFX "Unexpected fid 0x%04x\n", id);
1081 return -EINVAL;
1082 }
1083
1084 return 0;
1085}
1086
1087static int ezusb_read_pda(struct hermes *hw, __le16 *pda,
1088 u32 pda_addr, u16 pda_len)
1089{
1090 struct ezusb_priv *upriv = hw->priv;
1091 struct request_context *ctx;
1092 __le16 data[] = {
1093 cpu_to_le16(pda_addr & 0xffff),
1094 cpu_to_le16(pda_len - 4)
1095 };
1096 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_READ_PDA, EZUSB_RID_READ_PDA);
1097 if (!ctx)
1098 return -ENOMEM;
1099
1100 /* wl_lkm does not include PDA size in the PDA area.
1101 * We will pad the information into pda, so other routines
1102 * don't have to be modified */
1103 pda[0] = cpu_to_le16(pda_len - 2);
1104 /* Includes CFG_PROD_DATA but not itself */
1105 pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */
1106
1107 return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
1108 EZUSB_FRAME_CONTROL, &pda[2], pda_len - 4,
1109 NULL);
1110}
1111
1112static int ezusb_program_init(struct hermes *hw, u32 entry_point)
1113{
1114 struct ezusb_priv *upriv = hw->priv;
1115 struct request_context *ctx;
1116 __le32 data = cpu_to_le32(entry_point);
1117
1118 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_INIT, EZUSB_RID_ACK);
1119 if (!ctx)
1120 return -ENOMEM;
1121
1122 return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
1123 EZUSB_FRAME_CONTROL, NULL, 0, NULL);
1124}
1125
1126static int ezusb_program_end(struct hermes *hw)
1127{
1128 struct ezusb_priv *upriv = hw->priv;
1129 struct request_context *ctx;
1130
1131 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_END, EZUSB_RID_ACK);
1132 if (!ctx)
1133 return -ENOMEM;
1134
1135 return ezusb_access_ltv(upriv, ctx, 0, NULL,
1136 EZUSB_FRAME_CONTROL, NULL, 0, NULL);
1137}
1138
1139static int ezusb_program_bytes(struct hermes *hw, const char *buf,
1140 u32 addr, u32 len)
1141{
1142 struct ezusb_priv *upriv = hw->priv;
1143 struct request_context *ctx;
1144 __le32 data = cpu_to_le32(addr);
1145 int err;
1146
1147 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_SET_ADDR, EZUSB_RID_ACK);
1148 if (!ctx)
1149 return -ENOMEM;
1150
1151 err = ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
1152 EZUSB_FRAME_CONTROL, NULL, 0, NULL);
1153 if (err)
1154 return err;
1155
1156 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_BYTES, EZUSB_RID_ACK);
1157 if (!ctx)
1158 return -ENOMEM;
1159
1160 return ezusb_access_ltv(upriv, ctx, len, buf,
1161 EZUSB_FRAME_CONTROL, NULL, 0, NULL);
1162}
1163
1164static int ezusb_program(struct hermes *hw, const char *buf,
1165 u32 addr, u32 len)
1166{
1167 u32 ch_addr;
1168 u32 ch_len;
1169 int err = 0;
1170
1171 /* We can only send 2048 bytes out of the bulk xmit at a time,
1172 * so we have to split any programming into chunks of <2048
1173 * bytes. */
1174
1175 ch_len = (len < MAX_DL_SIZE) ? len : MAX_DL_SIZE;
1176 ch_addr = addr;
1177
1178 while (ch_addr < (addr + len)) {
1179 pr_debug("Programming subblock of length %d "
1180 "to address 0x%08x. Data @ %p\n",
1181 ch_len, ch_addr, &buf[ch_addr - addr]);
1182
1183 err = ezusb_program_bytes(hw, &buf[ch_addr - addr],
1184 ch_addr, ch_len);
1185 if (err)
1186 break;
1187
1188 ch_addr += ch_len;
1189 ch_len = ((addr + len - ch_addr) < MAX_DL_SIZE) ?
1190 (addr + len - ch_addr) : MAX_DL_SIZE;
1191 }
1192
1193 return err;
1194}
1195
1196static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev)
1197{
1198 struct orinoco_private *priv = ndev_priv(dev);
1199 struct net_device_stats *stats = &priv->stats;
1200 struct ezusb_priv *upriv = priv->card;
1201 int err = 0;
1202 char *p;
1203 struct ethhdr *eh;
1204 int len, data_len, data_off;
1205 __le16 tx_control;
1206 unsigned long flags;
1207 struct request_context *ctx;
1208 u8 *buf;
1209 int tx_size;
1210
1211 if (!netif_running(dev)) {
1212 printk(KERN_ERR "%s: Tx on stopped device!\n",
1213 dev->name);
1214 return NETDEV_TX_BUSY;
1215 }
1216
1217 if (netif_queue_stopped(dev)) {
1218 printk(KERN_DEBUG "%s: Tx while transmitter busy!\n",
1219 dev->name);
1220 return NETDEV_TX_BUSY;
1221 }
1222
1223 if (orinoco_lock(priv, &flags) != 0) {
1224 printk(KERN_ERR
1225 "%s: orinoco_xmit() called while hw_unavailable\n",
1226 dev->name);
1227 return NETDEV_TX_BUSY;
1228 }
1229
1230 if (!netif_carrier_ok(dev) ||
1231 (priv->iw_mode == NL80211_IFTYPE_MONITOR)) {
1232 /* Oops, the firmware hasn't established a connection,
1233 silently drop the packet (this seems to be the
1234 safest approach). */
1235 stats->tx_errors++;
1236 orinoco_unlock(priv, &flags);
1237 dev_kfree_skb(skb);
1238 return NETDEV_TX_OK;
1239 }
1240
1241 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0);
1242 if (!ctx)
1243 goto fail;
1244
1245 memset(ctx->buf, 0, BULK_BUF_SIZE);
1246 buf = ctx->buf->data;
1247
1248 /* Length of the packet body */
1249 /* FIXME: what if the skb is smaller than this? */
1250 len = max_t(int, skb->len - ETH_HLEN, ETH_ZLEN - ETH_HLEN);
1251
1252 eh = (struct ethhdr *) skb->data;
1253
1254 tx_control = cpu_to_le16(0);
1255 memcpy(buf, &tx_control, sizeof(tx_control));
1256 buf += sizeof(tx_control);
1257 /* Encapsulate Ethernet-II frames */
1258 if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */
1259 struct header_struct *hdr = (void *) buf;
1260 buf += sizeof(*hdr);
1261 data_len = len;
1262 data_off = sizeof(tx_control) + sizeof(*hdr);
1263 p = skb->data + ETH_HLEN;
1264
1265 /* 802.3 header */
1266 memcpy(hdr->dest, eh->h_dest, ETH_ALEN);
1267 memcpy(hdr->src, eh->h_source, ETH_ALEN);
1268 hdr->len = htons(data_len + ENCAPS_OVERHEAD);
1269
1270 /* 802.2 header */
1271 memcpy(&hdr->dsap, &encaps_hdr, sizeof(encaps_hdr));
1272
1273 hdr->ethertype = eh->h_proto;
1274 } else { /* IEEE 802.3 frame */
1275 data_len = len + ETH_HLEN;
1276 data_off = sizeof(tx_control);
1277 p = skb->data;
1278 }
1279
1280 memcpy(buf, p, data_len);
1281 buf += data_len;
1282
1283 /* Finally, we actually initiate the send */
1284 netif_stop_queue(dev);
1285
1286 /* The card may behave better if we send evenly sized usb transfers */
1287 tx_size = ALIGN(buf - ctx->buf->data, 2);
1288
1289 err = ezusb_access_ltv(upriv, ctx, tx_size, NULL,
1290 EZUSB_FRAME_DATA, NULL, 0, NULL);
1291
1292 if (err) {
1293 netif_start_queue(dev);
1294 if (net_ratelimit())
1295 printk(KERN_ERR "%s: Error %d transmitting packet\n",
1296 dev->name, err);
1297 stats->tx_errors++;
1298 goto fail;
1299 }
1300
1301 dev->trans_start = jiffies;
1302 stats->tx_bytes += data_off + data_len;
1303
1304 orinoco_unlock(priv, &flags);
1305
1306 dev_kfree_skb(skb);
1307
1308 return NETDEV_TX_OK;
1309
1310 fail:
1311 orinoco_unlock(priv, &flags);
1312 return NETDEV_TX_BUSY;
1313}
1314
1315static int ezusb_allocate(struct hermes *hw, u16 size, u16 *fid)
1316{
1317 *fid = EZUSB_RID_TX;
1318 return 0;
1319}
1320
1321
1322static int ezusb_hard_reset(struct orinoco_private *priv)
1323{
1324 struct ezusb_priv *upriv = priv->card;
1325 int retval = ezusb_8051_cpucs(upriv, 1);
1326
1327 if (retval < 0) {
1328 err("Failed to reset");
1329 return retval;
1330 }
1331
1332 retval = ezusb_8051_cpucs(upriv, 0);
1333 if (retval < 0) {
1334 err("Failed to unreset");
1335 return retval;
1336 }
1337
1338 dbg("sending control message");
1339 retval = usb_control_msg(upriv->udev,
1340 usb_sndctrlpipe(upriv->udev, 0),
1341 EZUSB_REQUEST_TRIGER,
1342 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
1343 USB_DIR_OUT, 0x0, 0x0, NULL, 0,
1344 DEF_TIMEOUT);
1345 if (retval < 0) {
1346 err("EZUSB_REQUEST_TRIGER failed retval %d", retval);
1347 return retval;
1348 }
1349#if 0
1350 dbg("Sending EZUSB_REQUEST_TRIG_AC");
1351 retval = usb_control_msg(upriv->udev,
1352 usb_sndctrlpipe(upriv->udev, 0),
1353 EZUSB_REQUEST_TRIG_AC,
1354 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
1355 USB_DIR_OUT, 0x00FA, 0x0, NULL, 0,
1356 DEF_TIMEOUT);
1357 if (retval < 0) {
1358 err("EZUSB_REQUEST_TRIG_AC failed retval %d", retval);
1359 return retval;
1360 }
1361#endif
1362
1363 return 0;
1364}
1365
1366
1367static int ezusb_init(hermes_t *hw)
1368{
1369 struct ezusb_priv *upriv = hw->priv;
1370 int retval;
1371
1372 BUG_ON(in_interrupt());
1373 BUG_ON(!upriv);
1374
1375 upriv->reply_count = 0;
1376 /* Write the MAGIC number on the simulated registers to keep
1377 * orinoco.c happy */
1378 hermes_write_regn(hw, SWSUPPORT0, HERMES_MAGIC);
1379 hermes_write_regn(hw, RXFID, EZUSB_RID_RX);
1380
1381 usb_kill_urb(upriv->read_urb);
1382 ezusb_submit_in_urb(upriv);
1383
1384 retval = ezusb_write_ltv(hw, 0, EZUSB_RID_INIT1,
1385 HERMES_BYTES_TO_RECLEN(2), "\x10\x00");
1386 if (retval < 0) {
1387 printk(KERN_ERR PFX "EZUSB_RID_INIT1 error %d\n", retval);
1388 return retval;
1389 }
1390
1391 retval = ezusb_docmd_wait(hw, HERMES_CMD_INIT, 0, NULL);
1392 if (retval < 0) {
1393 printk(KERN_ERR PFX "HERMES_CMD_INIT error %d\n", retval);
1394 return retval;
1395 }
1396
1397 return 0;
1398}
1399
1400static void ezusb_bulk_in_callback(struct urb *urb)
1401{
1402 struct ezusb_priv *upriv = (struct ezusb_priv *) urb->context;
1403 struct ezusb_packet *ans = urb->transfer_buffer;
1404 u16 crc;
1405 u16 hermes_rid;
1406
1407 if (upriv->udev == NULL) {
1408 dbg("disconnected");
1409 return;
1410 }
1411
1412 if (urb->status == -ETIMEDOUT) {
1413 /* When a device gets unplugged we get this every time
1414 * we resubmit, flooding the logs. Since we don't use
1415 * USB timeouts, it shouldn't happen any other time*/
1416 pr_warning("%s: urb timed out, not resubmiting", __func__);
1417 return;
1418 }
1419 if (urb->status == -ECONNABORTED) {
1420 pr_warning("%s: connection abort, resubmiting urb",
1421 __func__);
1422 goto resubmit;
1423 }
1424 if ((urb->status == -EILSEQ)
1425 || (urb->status == -ENOENT)
1426 || (urb->status == -ECONNRESET)) {
1427 dbg("status %d, not resubmiting", urb->status);
1428 return;
1429 }
1430 if (urb->status)
1431 dbg("status: %d length: %d",
1432 urb->status, urb->actual_length);
1433 if (urb->actual_length < sizeof(*ans)) {
1434 err("%s: short read, ignoring", __func__);
1435 goto resubmit;
1436 }
1437 crc = build_crc(ans);
1438 if (le16_to_cpu(ans->crc) != crc) {
1439 err("CRC error, ignoring packet");
1440 goto resubmit;
1441 }
1442
1443 hermes_rid = le16_to_cpu(ans->hermes_rid);
1444 if ((hermes_rid != EZUSB_RID_RX) && !EZUSB_IS_INFO(hermes_rid)) {
1445 ezusb_request_in_callback(upriv, urb);
1446 } else if (upriv->dev) {
1447 struct net_device *dev = upriv->dev;
1448 struct orinoco_private *priv = ndev_priv(dev);
1449 hermes_t *hw = &priv->hw;
1450
1451 if (hermes_rid == EZUSB_RID_RX) {
1452 __orinoco_ev_rx(dev, hw);
1453 } else {
1454 hermes_write_regn(hw, INFOFID,
1455 le16_to_cpu(ans->hermes_rid));
1456 __orinoco_ev_info(dev, hw);
1457 }
1458 }
1459
1460 resubmit:
1461 if (upriv->udev)
1462 ezusb_submit_in_urb(upriv);
1463}
1464
1465static inline void ezusb_delete(struct ezusb_priv *upriv)
1466{
1467 struct net_device *dev;
1468 struct list_head *item;
1469 struct list_head *tmp_item;
1470 unsigned long flags;
1471
1472 BUG_ON(in_interrupt());
1473 BUG_ON(!upriv);
1474
1475 dev = upriv->dev;
1476 mutex_lock(&upriv->mtx);
1477
1478 upriv->udev = NULL; /* No timer will be rearmed from here */
1479
1480 usb_kill_urb(upriv->read_urb);
1481
1482 spin_lock_irqsave(&upriv->req_lock, flags);
1483 list_for_each_safe(item, tmp_item, &upriv->req_active) {
1484 struct request_context *ctx;
1485 int err;
1486
1487 ctx = list_entry(item, struct request_context, list);
1488 atomic_inc(&ctx->refcount);
1489
1490 ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
1491 err = usb_unlink_urb(ctx->outurb);
1492
1493 spin_unlock_irqrestore(&upriv->req_lock, flags);
1494 if (err == -EINPROGRESS)
1495 wait_for_completion(&ctx->done);
1496
1497 del_timer_sync(&ctx->timer);
1498 /* FIXME: there is an slight chance for the irq handler to
1499 * be running */
1500 if (!list_empty(&ctx->list))
1501 ezusb_ctx_complete(ctx);
1502
1503 ezusb_request_context_put(ctx);
1504 spin_lock_irqsave(&upriv->req_lock, flags);
1505 }
1506 spin_unlock_irqrestore(&upriv->req_lock, flags);
1507
1508 list_for_each_safe(item, tmp_item, &upriv->req_pending)
1509 ezusb_ctx_complete(list_entry(item,
1510 struct request_context, list));
1511
1512 if (upriv->read_urb->status == -EINPROGRESS)
1513 printk(KERN_ERR PFX "Some URB in progress\n");
1514
1515 mutex_unlock(&upriv->mtx);
1516
1517 kfree(upriv->read_urb->transfer_buffer);
1518 if (upriv->bap_buf != NULL)
1519 kfree(upriv->bap_buf);
1520 if (upriv->read_urb != NULL)
1521 usb_free_urb(upriv->read_urb);
1522 if (upriv->dev) {
1523 struct orinoco_private *priv = ndev_priv(upriv->dev);
1524 orinoco_if_del(priv);
1525 free_orinocodev(priv);
1526 }
1527}
1528
1529static void ezusb_lock_irqsave(spinlock_t *lock,
1530 unsigned long *flags) __acquires(lock)
1531{
1532 spin_lock_bh(lock);
1533}
1534
1535static void ezusb_unlock_irqrestore(spinlock_t *lock,
1536 unsigned long *flags) __releases(lock)
1537{
1538 spin_unlock_bh(lock);
1539}
1540
1541static void ezusb_lock_irq(spinlock_t *lock) __acquires(lock)
1542{
1543 spin_lock_bh(lock);
1544}
1545
1546static void ezusb_unlock_irq(spinlock_t *lock) __releases(lock)
1547{
1548 spin_unlock_bh(lock);
1549}
1550
1551static const struct hermes_ops ezusb_ops = {
1552 .init = ezusb_init,
1553 .cmd_wait = ezusb_docmd_wait,
1554 .init_cmd_wait = ezusb_doicmd_wait,
1555 .allocate = ezusb_allocate,
1556 .read_ltv = ezusb_read_ltv,
1557 .write_ltv = ezusb_write_ltv,
1558 .bap_pread = ezusb_bap_pread,
1559 .read_pda = ezusb_read_pda,
1560 .program_init = ezusb_program_init,
1561 .program_end = ezusb_program_end,
1562 .program = ezusb_program,
1563 .lock_irqsave = ezusb_lock_irqsave,
1564 .unlock_irqrestore = ezusb_unlock_irqrestore,
1565 .lock_irq = ezusb_lock_irq,
1566 .unlock_irq = ezusb_unlock_irq,
1567};
1568
1569static const struct net_device_ops ezusb_netdev_ops = {
1570 .ndo_open = orinoco_open,
1571 .ndo_stop = orinoco_stop,
1572 .ndo_start_xmit = ezusb_xmit,
1573 .ndo_set_multicast_list = orinoco_set_multicast_list,
1574 .ndo_change_mtu = orinoco_change_mtu,
1575 .ndo_set_mac_address = eth_mac_addr,
1576 .ndo_validate_addr = eth_validate_addr,
1577 .ndo_tx_timeout = orinoco_tx_timeout,
1578 .ndo_get_stats = orinoco_get_stats,
1579};
1580
1581static int ezusb_probe(struct usb_interface *interface,
1582 const struct usb_device_id *id)
1583{
1584 struct usb_device *udev = interface_to_usbdev(interface);
1585 struct orinoco_private *priv;
1586 hermes_t *hw;
1587 struct ezusb_priv *upriv = NULL;
1588 struct usb_interface_descriptor *iface_desc;
1589 struct usb_endpoint_descriptor *ep;
1590 const struct firmware *fw_entry;
1591 int retval = 0;
1592 int i;
1593
1594 priv = alloc_orinocodev(sizeof(*upriv), &udev->dev,
1595 ezusb_hard_reset, NULL);
1596 if (!priv) {
1597 err("Couldn't allocate orinocodev");
1598 goto exit;
1599 }
1600
1601 hw = &priv->hw;
1602
1603 upriv = priv->card;
1604
1605 mutex_init(&upriv->mtx);
1606 spin_lock_init(&upriv->reply_count_lock);
1607
1608 spin_lock_init(&upriv->req_lock);
1609 INIT_LIST_HEAD(&upriv->req_pending);
1610 INIT_LIST_HEAD(&upriv->req_active);
1611
1612 upriv->udev = udev;
1613
1614 hw->iobase = (void __force __iomem *) &upriv->hermes_reg_fake;
1615 hw->reg_spacing = HERMES_16BIT_REGSPACING;
1616 hw->priv = upriv;
1617 hw->ops = &ezusb_ops;
1618
1619 /* set up the endpoint information */
1620 /* check out the endpoints */
1621
1622 iface_desc = &interface->altsetting[0].desc;
1623 for (i = 0; i < iface_desc->bNumEndpoints; ++i) {
1624 ep = &interface->altsetting[0].endpoint[i].desc;
1625
1626 if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
1627 == USB_DIR_IN) &&
1628 ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
1629 == USB_ENDPOINT_XFER_BULK)) {
1630 /* we found a bulk in endpoint */
1631 if (upriv->read_urb != NULL) {
1632 pr_warning("Found a second bulk in ep, ignored");
1633 continue;
1634 }
1635
1636 upriv->read_urb = usb_alloc_urb(0, GFP_KERNEL);
1637 if (!upriv->read_urb) {
1638 err("No free urbs available");
1639 goto error;
1640 }
1641 if (le16_to_cpu(ep->wMaxPacketSize) != 64)
1642 pr_warning("bulk in: wMaxPacketSize!= 64");
1643 if (ep->bEndpointAddress != (2 | USB_DIR_IN))
1644 pr_warning("bulk in: bEndpointAddress: %d",
1645 ep->bEndpointAddress);
1646 upriv->read_pipe = usb_rcvbulkpipe(udev,
1647 ep->
1648 bEndpointAddress);
1649 upriv->read_urb->transfer_buffer =
1650 kmalloc(BULK_BUF_SIZE, GFP_KERNEL);
1651 if (!upriv->read_urb->transfer_buffer) {
1652 err("Couldn't allocate IN buffer");
1653 goto error;
1654 }
1655 }
1656
1657 if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
1658 == USB_DIR_OUT) &&
1659 ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
1660 == USB_ENDPOINT_XFER_BULK)) {
1661 /* we found a bulk out endpoint */
1662 if (upriv->bap_buf != NULL) {
1663 pr_warning("Found a second bulk out ep, ignored");
1664 continue;
1665 }
1666
1667 if (le16_to_cpu(ep->wMaxPacketSize) != 64)
1668 pr_warning("bulk out: wMaxPacketSize != 64");
1669 if (ep->bEndpointAddress != 2)
1670 pr_warning("bulk out: bEndpointAddress: %d",
1671 ep->bEndpointAddress);
1672 upriv->write_pipe = usb_sndbulkpipe(udev,
1673 ep->
1674 bEndpointAddress);
1675 upriv->bap_buf = kmalloc(BULK_BUF_SIZE, GFP_KERNEL);
1676 if (!upriv->bap_buf) {
1677 err("Couldn't allocate bulk_out_buffer");
1678 goto error;
1679 }
1680 }
1681 }
1682 if (!upriv->bap_buf || !upriv->read_urb) {
1683 err("Didn't find the required bulk endpoints");
1684 goto error;
1685 }
1686
1687 if (request_firmware(&fw_entry, "orinoco_ezusb_fw",
1688 &interface->dev) == 0) {
1689 firmware.size = fw_entry->size;
1690 firmware.code = fw_entry->data;
1691 }
1692 if (firmware.size && firmware.code) {
1693 ezusb_firmware_download(upriv, &firmware);
1694 } else {
1695 err("No firmware to download");
1696 goto error;
1697 }
1698
1699 if (ezusb_hard_reset(priv) < 0) {
1700 err("Cannot reset the device");
1701 goto error;
1702 }
1703
1704 /* If the firmware is already downloaded orinoco.c will call
1705 * ezusb_init but if the firmware is not already there, that will make
1706 * the kernel very unstable, so we try initializing here and quit in
1707 * case of error */
1708 if (ezusb_init(hw) < 0) {
1709 err("Couldn't initialize the device");
1710 err("Firmware may not be downloaded or may be wrong.");
1711 goto error;
1712 }
1713
1714 /* Initialise the main driver */
1715 if (orinoco_init(priv) != 0) {
1716 err("orinoco_init() failed\n");
1717 goto error;
1718 }
1719
1720 if (orinoco_if_add(priv, 0, 0, &ezusb_netdev_ops) != 0) {
1721 upriv->dev = NULL;
1722 err("%s: orinoco_if_add() failed", __func__);
1723 goto error;
1724 }
1725 upriv->dev = priv->ndev;
1726
1727 goto exit;
1728
1729 error:
1730 ezusb_delete(upriv);
1731 if (upriv->dev) {
1732 /* upriv->dev was 0, so ezusb_delete() didn't free it */
1733 free_orinocodev(priv);
1734 }
1735 upriv = NULL;
1736 retval = -EFAULT;
1737 exit:
1738 if (fw_entry) {
1739 firmware.code = NULL;
1740 firmware.size = 0;
1741 release_firmware(fw_entry);
1742 }
1743 usb_set_intfdata(interface, upriv);
1744 return retval;
1745}
1746
1747
1748static void ezusb_disconnect(struct usb_interface *intf)
1749{
1750 struct ezusb_priv *upriv = usb_get_intfdata(intf);
1751 usb_set_intfdata(intf, NULL);
1752 ezusb_delete(upriv);
1753 printk(KERN_INFO PFX "Disconnected\n");
1754}
1755
1756
1757/* usb specific object needed to register this driver with the usb subsystem */
1758static struct usb_driver orinoco_driver = {
1759 .name = DRIVER_NAME,
1760 .probe = ezusb_probe,
1761 .disconnect = ezusb_disconnect,
1762 .id_table = ezusb_table,
1763};
1764
1765/* Can't be declared "const" or the whole __initdata section will
1766 * become const */
1767static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
1768 " (Manuel Estrada Sainz)";
1769
1770static int __init ezusb_module_init(void)
1771{
1772 int err;
1773
1774 printk(KERN_DEBUG "%s\n", version);
1775
1776 /* register this driver with the USB subsystem */
1777 err = usb_register(&orinoco_driver);
1778 if (err < 0) {
1779 printk(KERN_ERR PFX "usb_register failed, error %d\n",
1780 err);
1781 return err;
1782 }
1783
1784 return 0;
1785}
1786
1787static void __exit ezusb_module_exit(void)
1788{
1789 /* deregister this driver with the USB subsystem */
1790 usb_deregister(&orinoco_driver);
1791}
1792
1793
1794module_init(ezusb_module_init);
1795module_exit(ezusb_module_exit);
1796
1797MODULE_AUTHOR("Manuel Estrada Sainz");
1798MODULE_DESCRIPTION
1799 ("Driver for Orinoco wireless LAN cards using EZUSB bridge");
1800MODULE_LICENSE("Dual MPL/GPL");
diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c
index 330d42d45333..4300d9db7d8c 100644
--- a/drivers/net/wireless/orinoco/scan.c
+++ b/drivers/net/wireless/orinoco/scan.c
@@ -127,7 +127,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
127{ 127{
128 struct wiphy *wiphy = priv_to_wiphy(priv); 128 struct wiphy *wiphy = priv_to_wiphy(priv);
129 struct ieee80211_channel *channel; 129 struct ieee80211_channel *channel;
130 u8 *ie; 130 const u8 *ie;
131 u64 timestamp; 131 u64 timestamp;
132 s32 signal; 132 s32 signal;
133 u16 capability; 133 u16 capability;
@@ -136,7 +136,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
136 int chan, freq; 136 int chan, freq;
137 137
138 ie_len = len - sizeof(*bss); 138 ie_len = len - sizeof(*bss);
139 ie = orinoco_get_ie(bss->data, ie_len, WLAN_EID_DS_PARAMS); 139 ie = cfg80211_find_ie(WLAN_EID_DS_PARAMS, bss->data, ie_len);
140 chan = ie ? ie[2] : 0; 140 chan = ie ? ie[2] : 0;
141 freq = ieee80211_dsss_chan_to_freq(chan); 141 freq = ieee80211_dsss_chan_to_freq(chan);
142 channel = ieee80211_get_channel(wiphy, freq); 142 channel = ieee80211_get_channel(wiphy, freq);
diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c
index 59bda240fdc2..9b1af4976bf5 100644
--- a/drivers/net/wireless/orinoco/spectrum_cs.c
+++ b/drivers/net/wireless/orinoco/spectrum_cs.c
@@ -349,6 +349,7 @@ spectrum_cs_config(struct pcmcia_device *link)
349 goto failed; 349 goto failed;
350 350
351 hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING); 351 hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
352 hw->eeprom_pda = true;
352 353
353 /* 354 /*
354 * This actually configures the PCMCIA socket -- setting up 355 * This actually configures the PCMCIA socket -- setting up
@@ -374,7 +375,7 @@ spectrum_cs_config(struct pcmcia_device *link)
374 375
375 /* Register an interface with the stack */ 376 /* Register an interface with the stack */
376 if (orinoco_if_add(priv, link->io.BasePort1, 377 if (orinoco_if_add(priv, link->io.BasePort1,
377 link->irq.AssignedIRQ) != 0) { 378 link->irq.AssignedIRQ, NULL) != 0) {
378 printk(KERN_ERR PFX "orinoco_if_add() failed\n"); 379 printk(KERN_ERR PFX "orinoco_if_add() failed\n");
379 goto failed; 380 goto failed;
380 } 381 }
@@ -405,9 +406,9 @@ spectrum_cs_release(struct pcmcia_device *link)
405 406
406 /* We're committed to taking the device away now, so mark the 407 /* We're committed to taking the device away now, so mark the
407 * hardware as unavailable */ 408 * hardware as unavailable */
408 spin_lock_irqsave(&priv->lock, flags); 409 priv->hw.ops->lock_irqsave(&priv->lock, &flags);
409 priv->hw_unavailable++; 410 priv->hw_unavailable++;
410 spin_unlock_irqrestore(&priv->lock, flags); 411 priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
411 412
412 pcmcia_disable_device(link); 413 pcmcia_disable_device(link);
413 if (priv->hw.iobase) 414 if (priv->hw.iobase)
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index 57b850ebfeb2..5775124e2aee 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -458,7 +458,7 @@ static int orinoco_ioctl_setfreq(struct net_device *dev,
458 if (priv->iw_mode == NL80211_IFTYPE_MONITOR) { 458 if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
459 /* Fast channel change - no commit if successful */ 459 /* Fast channel change - no commit if successful */
460 hermes_t *hw = &priv->hw; 460 hermes_t *hw = &priv->hw;
461 err = hermes_docmd_wait(hw, HERMES_CMD_TEST | 461 err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
462 HERMES_TEST_SET_CHANNEL, 462 HERMES_TEST_SET_CHANNEL,
463 chan, NULL); 463 chan, NULL);
464 } 464 }
@@ -538,125 +538,6 @@ static int orinoco_ioctl_setsens(struct net_device *dev,
538 return -EINPROGRESS; /* Call commit handler */ 538 return -EINPROGRESS; /* Call commit handler */
539} 539}
540 540
541static int orinoco_ioctl_setrts(struct net_device *dev,
542 struct iw_request_info *info,
543 struct iw_param *rrq,
544 char *extra)
545{
546 struct orinoco_private *priv = ndev_priv(dev);
547 int val = rrq->value;
548 unsigned long flags;
549
550 if (rrq->disabled)
551 val = 2347;
552
553 if ((val < 0) || (val > 2347))
554 return -EINVAL;
555
556 if (orinoco_lock(priv, &flags) != 0)
557 return -EBUSY;
558
559 priv->rts_thresh = val;
560 orinoco_unlock(priv, &flags);
561
562 return -EINPROGRESS; /* Call commit handler */
563}
564
565static int orinoco_ioctl_getrts(struct net_device *dev,
566 struct iw_request_info *info,
567 struct iw_param *rrq,
568 char *extra)
569{
570 struct orinoco_private *priv = ndev_priv(dev);
571
572 rrq->value = priv->rts_thresh;
573 rrq->disabled = (rrq->value == 2347);
574 rrq->fixed = 1;
575
576 return 0;
577}
578
579static int orinoco_ioctl_setfrag(struct net_device *dev,
580 struct iw_request_info *info,
581 struct iw_param *frq,
582 char *extra)
583{
584 struct orinoco_private *priv = ndev_priv(dev);
585 int err = -EINPROGRESS; /* Call commit handler */
586 unsigned long flags;
587
588 if (orinoco_lock(priv, &flags) != 0)
589 return -EBUSY;
590
591 if (priv->has_mwo) {
592 if (frq->disabled)
593 priv->mwo_robust = 0;
594 else {
595 if (frq->fixed)
596 printk(KERN_WARNING "%s: Fixed fragmentation "
597 "is not supported on this firmware. "
598 "Using MWO robust instead.\n",
599 dev->name);
600 priv->mwo_robust = 1;
601 }
602 } else {
603 if (frq->disabled)
604 priv->frag_thresh = 2346;
605 else {
606 if ((frq->value < 256) || (frq->value > 2346))
607 err = -EINVAL;
608 else
609 /* must be even */
610 priv->frag_thresh = frq->value & ~0x1;
611 }
612 }
613
614 orinoco_unlock(priv, &flags);
615
616 return err;
617}
618
619static int orinoco_ioctl_getfrag(struct net_device *dev,
620 struct iw_request_info *info,
621 struct iw_param *frq,
622 char *extra)
623{
624 struct orinoco_private *priv = ndev_priv(dev);
625 hermes_t *hw = &priv->hw;
626 int err;
627 u16 val;
628 unsigned long flags;
629
630 if (orinoco_lock(priv, &flags) != 0)
631 return -EBUSY;
632
633 if (priv->has_mwo) {
634 err = hermes_read_wordrec(hw, USER_BAP,
635 HERMES_RID_CNFMWOROBUST_AGERE,
636 &val);
637 if (err)
638 val = 0;
639
640 frq->value = val ? 2347 : 0;
641 frq->disabled = !val;
642 frq->fixed = 0;
643 } else {
644 err = hermes_read_wordrec(hw, USER_BAP,
645 HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
646 &val);
647 if (err)
648 val = 0;
649
650 frq->value = val;
651 frq->disabled = (val >= 2346);
652 frq->fixed = 1;
653 }
654
655 orinoco_unlock(priv, &flags);
656
657 return err;
658}
659
660static int orinoco_ioctl_setrate(struct net_device *dev, 541static int orinoco_ioctl_setrate(struct net_device *dev,
661 struct iw_request_info *info, 542 struct iw_request_info *info,
662 struct iw_param *rrq, 543 struct iw_param *rrq,
@@ -1201,60 +1082,6 @@ static int orinoco_ioctl_set_mlme(struct net_device *dev,
1201 return ret; 1082 return ret;
1202} 1083}
1203 1084
1204static int orinoco_ioctl_getretry(struct net_device *dev,
1205 struct iw_request_info *info,
1206 struct iw_param *rrq,
1207 char *extra)
1208{
1209 struct orinoco_private *priv = ndev_priv(dev);
1210 hermes_t *hw = &priv->hw;
1211 int err = 0;
1212 u16 short_limit, long_limit, lifetime;
1213 unsigned long flags;
1214
1215 if (orinoco_lock(priv, &flags) != 0)
1216 return -EBUSY;
1217
1218 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_SHORTRETRYLIMIT,
1219 &short_limit);
1220 if (err)
1221 goto out;
1222
1223 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_LONGRETRYLIMIT,
1224 &long_limit);
1225 if (err)
1226 goto out;
1227
1228 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_MAXTRANSMITLIFETIME,
1229 &lifetime);
1230 if (err)
1231 goto out;
1232
1233 rrq->disabled = 0; /* Can't be disabled */
1234
1235 /* Note : by default, display the retry number */
1236 if ((rrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
1237 rrq->flags = IW_RETRY_LIFETIME;
1238 rrq->value = lifetime * 1000; /* ??? */
1239 } else {
1240 /* By default, display the min number */
1241 if ((rrq->flags & IW_RETRY_LONG)) {
1242 rrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
1243 rrq->value = long_limit;
1244 } else {
1245 rrq->flags = IW_RETRY_LIMIT;
1246 rrq->value = short_limit;
1247 if (short_limit != long_limit)
1248 rrq->flags |= IW_RETRY_SHORT;
1249 }
1250 }
1251
1252 out:
1253 orinoco_unlock(priv, &flags);
1254
1255 return err;
1256}
1257
1258static int orinoco_ioctl_reset(struct net_device *dev, 1085static int orinoco_ioctl_reset(struct net_device *dev,
1259 struct iw_request_info *info, 1086 struct iw_request_info *info,
1260 void *wrqu, 1087 void *wrqu,
@@ -1446,8 +1273,8 @@ static int orinoco_ioctl_getrid(struct net_device *dev,
1446 if (orinoco_lock(priv, &flags) != 0) 1273 if (orinoco_lock(priv, &flags) != 0)
1447 return -EBUSY; 1274 return -EBUSY;
1448 1275
1449 err = hermes_read_ltv(hw, USER_BAP, rid, MAX_RID_LEN, &length, 1276 err = hw->ops->read_ltv(hw, USER_BAP, rid, MAX_RID_LEN, &length,
1450 extra); 1277 extra);
1451 if (err) 1278 if (err)
1452 goto out; 1279 goto out;
1453 1280
@@ -1528,11 +1355,11 @@ static const iw_handler orinoco_handler[] = {
1528 IW_HANDLER(SIOCGIWESSID, (iw_handler)orinoco_ioctl_getessid), 1355 IW_HANDLER(SIOCGIWESSID, (iw_handler)orinoco_ioctl_getessid),
1529 IW_HANDLER(SIOCSIWRATE, (iw_handler)orinoco_ioctl_setrate), 1356 IW_HANDLER(SIOCSIWRATE, (iw_handler)orinoco_ioctl_setrate),
1530 IW_HANDLER(SIOCGIWRATE, (iw_handler)orinoco_ioctl_getrate), 1357 IW_HANDLER(SIOCGIWRATE, (iw_handler)orinoco_ioctl_getrate),
1531 IW_HANDLER(SIOCSIWRTS, (iw_handler)orinoco_ioctl_setrts), 1358 IW_HANDLER(SIOCSIWRTS, (iw_handler)cfg80211_wext_siwrts),
1532 IW_HANDLER(SIOCGIWRTS, (iw_handler)orinoco_ioctl_getrts), 1359 IW_HANDLER(SIOCGIWRTS, (iw_handler)cfg80211_wext_giwrts),
1533 IW_HANDLER(SIOCSIWFRAG, (iw_handler)orinoco_ioctl_setfrag), 1360 IW_HANDLER(SIOCSIWFRAG, (iw_handler)cfg80211_wext_siwfrag),
1534 IW_HANDLER(SIOCGIWFRAG, (iw_handler)orinoco_ioctl_getfrag), 1361 IW_HANDLER(SIOCGIWFRAG, (iw_handler)cfg80211_wext_giwfrag),
1535 IW_HANDLER(SIOCGIWRETRY, (iw_handler)orinoco_ioctl_getretry), 1362 IW_HANDLER(SIOCGIWRETRY, (iw_handler)cfg80211_wext_giwretry),
1536 IW_HANDLER(SIOCSIWENCODE, (iw_handler)orinoco_ioctl_setiwencode), 1363 IW_HANDLER(SIOCSIWENCODE, (iw_handler)orinoco_ioctl_setiwencode),
1537 IW_HANDLER(SIOCGIWENCODE, (iw_handler)orinoco_ioctl_getiwencode), 1364 IW_HANDLER(SIOCGIWENCODE, (iw_handler)orinoco_ioctl_getiwencode),
1538 IW_HANDLER(SIOCSIWPOWER, (iw_handler)orinoco_ioctl_setpower), 1365 IW_HANDLER(SIOCSIWPOWER, (iw_handler)orinoco_ioctl_setpower),
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 7bbd9d3bba60..c072f41747ca 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -546,8 +546,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
546 IEEE80211_HW_SUPPORTS_PS | 546 IEEE80211_HW_SUPPORTS_PS |
547 IEEE80211_HW_PS_NULLFUNC_STACK | 547 IEEE80211_HW_PS_NULLFUNC_STACK |
548 IEEE80211_HW_BEACON_FILTER | 548 IEEE80211_HW_BEACON_FILTER |
549 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 549 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
550 IEEE80211_HW_NOISE_DBM;
551 550
552 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 551 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
553 BIT(NL80211_IFTYPE_ADHOC) | 552 BIT(NL80211_IFTYPE_ADHOC) |
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 86f3e9ac4c7a..07c4528f6e6b 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -140,7 +140,7 @@ static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
140 140
141 idx = le32_to_cpu(ring_control->host_idx[ring_index]); 141 idx = le32_to_cpu(ring_control->host_idx[ring_index]);
142 limit = idx; 142 limit = idx;
143 limit -= le32_to_cpu(index); 143 limit -= index;
144 limit = ring_limit - limit; 144 limit = ring_limit - limit;
145 145
146 i = idx % ring_limit; 146 i = idx % ring_limit;
@@ -246,7 +246,7 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
246 u32 idx, i; 246 u32 idx, i;
247 247
248 i = (*index) % ring_limit; 248 i = (*index) % ring_limit;
249 (*index) = idx = le32_to_cpu(ring_control->device_idx[1]); 249 (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
250 idx %= ring_limit; 250 idx %= ring_limit;
251 251
252 while (i != idx) { 252 while (i != idx) {
@@ -277,6 +277,14 @@ static void p54p_tasklet(unsigned long dev_id)
277 struct p54p_priv *priv = dev->priv; 277 struct p54p_priv *priv = dev->priv;
278 struct p54p_ring_control *ring_control = priv->ring_control; 278 struct p54p_ring_control *ring_control = priv->ring_control;
279 279
280 p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt,
281 ARRAY_SIZE(ring_control->tx_mgmt),
282 priv->tx_buf_mgmt);
283
284 p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data,
285 ARRAY_SIZE(ring_control->tx_data),
286 priv->tx_buf_data);
287
280 p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt, 288 p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt,
281 ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt); 289 ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt);
282 290
@@ -285,14 +293,6 @@ static void p54p_tasklet(unsigned long dev_id)
285 293
286 wmb(); 294 wmb();
287 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); 295 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
288
289 p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt,
290 ARRAY_SIZE(ring_control->tx_mgmt),
291 priv->tx_buf_mgmt);
292
293 p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data,
294 ARRAY_SIZE(ring_control->tx_data),
295 priv->tx_buf_data);
296} 296}
297 297
298static irqreturn_t p54p_interrupt(int irq, void *dev_id) 298static irqreturn_t p54p_interrupt(int irq, void *dev_id)
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 2ceff5480355..4e6891099d43 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -350,7 +350,6 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
350 rx_status->flag |= RX_FLAG_MMIC_ERROR; 350 rx_status->flag |= RX_FLAG_MMIC_ERROR;
351 351
352 rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi); 352 rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi);
353 rx_status->noise = priv->noise;
354 if (hdr->rate & 0x10) 353 if (hdr->rate & 0x10)
355 rx_status->flag |= RX_FLAG_SHORTPRE; 354 rx_status->flag |= RX_FLAG_SHORTPRE;
356 if (priv->hw->conf.channel->band == IEEE80211_BAND_5GHZ) 355 if (priv->hw->conf.channel->band == IEEE80211_BAND_5GHZ)
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 5239e082cd0f..eea1ef2f502b 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -87,7 +87,7 @@ if RT2800PCI
87 87
88config RT2800PCI_RT30XX 88config RT2800PCI_RT30XX
89 bool "rt2800pci - Include support for rt30xx (PCI/PCIe/PCMCIA) devices" 89 bool "rt2800pci - Include support for rt30xx (PCI/PCIe/PCMCIA) devices"
90 default n 90 default y
91 ---help--- 91 ---help---
92 This adds support for rt30xx wireless chipset family to the 92 This adds support for rt30xx wireless chipset family to the
93 rt2800pci driver. 93 rt2800pci driver.
@@ -156,7 +156,7 @@ if RT2800USB
156 156
157config RT2800USB_RT30XX 157config RT2800USB_RT30XX
158 bool "rt2800usb - Include support for rt30xx (USB) devices" 158 bool "rt2800usb - Include support for rt30xx (USB) devices"
159 default n 159 default y
160 ---help--- 160 ---help---
161 This adds support for rt30xx wireless chipset family to the 161 This adds support for rt30xx wireless chipset family to the
162 rt2800usb driver. 162 rt2800usb driver.
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index cdbf59108ef9..06b92f8b7a55 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1018,8 +1018,8 @@ static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1018 rt2x00_desc_write(entry_priv->desc, 1, word); 1018 rt2x00_desc_write(entry_priv->desc, 1, word);
1019 1019
1020 rt2x00_desc_read(txd, 2, &word); 1020 rt2x00_desc_read(txd, 2, &word);
1021 rt2x00_set_field32(&word, TXD_W2_BUFFER_LENGTH, skb->len); 1021 rt2x00_set_field32(&word, TXD_W2_BUFFER_LENGTH, txdesc->length);
1022 rt2x00_set_field32(&word, TXD_W2_DATABYTE_COUNT, skb->len); 1022 rt2x00_set_field32(&word, TXD_W2_DATABYTE_COUNT, txdesc->length);
1023 rt2x00_desc_write(txd, 2, word); 1023 rt2x00_desc_write(txd, 2, word);
1024 1024
1025 rt2x00_desc_read(txd, 3, &word); 1025 rt2x00_desc_read(txd, 3, &word);
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 89e986f449da..ae8e205df269 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1209,7 +1209,7 @@ static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1209 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1209 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1210 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1210 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1211 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); 1211 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
1212 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len); 1212 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
1213 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE); 1213 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE);
1214 rt2x00_desc_write(txd, 0, word); 1214 rt2x00_desc_write(txd, 0, word);
1215} 1215}
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 7185cb05f257..41d9996c80e6 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1072,7 +1072,7 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1072 rt2x00_set_field32(&word, TXD_W0_NEW_SEQ, 1072 rt2x00_set_field32(&word, TXD_W0_NEW_SEQ,
1073 test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)); 1073 test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags));
1074 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1074 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1075 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len); 1075 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
1076 rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher); 1076 rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher);
1077 rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx); 1077 rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx);
1078 rt2x00_desc_write(txd, 0, word); 1078 rt2x00_desc_write(txd, 0, word);
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index ec893721cc80..2aa03751c341 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -107,7 +107,7 @@
107/* 107/*
108 * INT_SOURCE_CSR: Interrupt source register. 108 * INT_SOURCE_CSR: Interrupt source register.
109 * Write one to clear corresponding bit. 109 * Write one to clear corresponding bit.
110 * TX_FIFO_STATUS: FIFO Statistics is full, sw should read 0x171c 110 * TX_FIFO_STATUS: FIFO Statistics is full, sw should read TX_STA_FIFO
111 */ 111 */
112#define INT_SOURCE_CSR 0x0200 112#define INT_SOURCE_CSR 0x0200
113#define INT_SOURCE_CSR_RXDELAYINT FIELD32(0x00000001) 113#define INT_SOURCE_CSR_RXDELAYINT FIELD32(0x00000001)
@@ -845,7 +845,7 @@
845 * TX_BAND_CFG: 0x1 use upper 20MHz, 0x0 use lower 20MHz 845 * TX_BAND_CFG: 0x1 use upper 20MHz, 0x0 use lower 20MHz
846 */ 846 */
847#define TX_BAND_CFG 0x132c 847#define TX_BAND_CFG 0x132c
848#define TX_BAND_CFG_HT40_PLUS FIELD32(0x00000001) 848#define TX_BAND_CFG_HT40_MINUS FIELD32(0x00000001)
849#define TX_BAND_CFG_A FIELD32(0x00000002) 849#define TX_BAND_CFG_A FIELD32(0x00000002)
850#define TX_BAND_CFG_BG FIELD32(0x00000004) 850#define TX_BAND_CFG_BG FIELD32(0x00000004)
851 851
@@ -1519,7 +1519,7 @@ struct mac_iveiv_entry {
1519 * BBP 3: RX Antenna 1519 * BBP 3: RX Antenna
1520 */ 1520 */
1521#define BBP3_RX_ANTENNA FIELD8(0x18) 1521#define BBP3_RX_ANTENNA FIELD8(0x18)
1522#define BBP3_HT40_PLUS FIELD8(0x20) 1522#define BBP3_HT40_MINUS FIELD8(0x20)
1523 1523
1524/* 1524/*
1525 * BBP 4: Bandwidth 1525 * BBP 4: Bandwidth
@@ -1566,6 +1566,11 @@ struct mac_iveiv_entry {
1566#define RFCSR12_TX_POWER FIELD8(0x1f) 1566#define RFCSR12_TX_POWER FIELD8(0x1f)
1567 1567
1568/* 1568/*
1569 * RFCSR 13:
1570 */
1571#define RFCSR13_TX_POWER FIELD8(0x1f)
1572
1573/*
1569 * RFCSR 15: 1574 * RFCSR 15:
1570 */ 1575 */
1571#define RFCSR15_TX_LO2_EN FIELD8(0x08) 1576#define RFCSR15_TX_LO2_EN FIELD8(0x08)
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 2648f315a934..e37bbeab9233 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -41,9 +41,6 @@
41#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE) 41#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
42#include "rt2x00usb.h" 42#include "rt2x00usb.h"
43#endif 43#endif
44#if defined(CONFIG_RT2X00_LIB_PCI) || defined(CONFIG_RT2X00_LIB_PCI_MODULE)
45#include "rt2x00pci.h"
46#endif
47#include "rt2800lib.h" 44#include "rt2800lib.h"
48#include "rt2800.h" 45#include "rt2800.h"
49#include "rt2800usb.h" 46#include "rt2800usb.h"
@@ -76,6 +73,23 @@ MODULE_LICENSE("GPL");
76 rt2800_regbusy_read((__dev), H2M_MAILBOX_CSR, \ 73 rt2800_regbusy_read((__dev), H2M_MAILBOX_CSR, \
77 H2M_MAILBOX_CSR_OWNER, (__reg)) 74 H2M_MAILBOX_CSR_OWNER, (__reg))
78 75
76static inline bool rt2800_is_305x_soc(struct rt2x00_dev *rt2x00dev)
77{
78 /* check for rt2872 on SoC */
79 if (!rt2x00_is_soc(rt2x00dev) ||
80 !rt2x00_rt(rt2x00dev, RT2872))
81 return false;
82
83 /* we know for sure that these rf chipsets are used on rt305x boards */
84 if (rt2x00_rf(rt2x00dev, RF3020) ||
85 rt2x00_rf(rt2x00dev, RF3021) ||
86 rt2x00_rf(rt2x00dev, RF3022))
87 return true;
88
89 NOTICE(rt2x00dev, "Unknown RF chipset on rt305x\n");
90 return false;
91}
92
79static void rt2800_bbp_write(struct rt2x00_dev *rt2x00dev, 93static void rt2800_bbp_write(struct rt2x00_dev *rt2x00dev,
80 const unsigned int word, const u8 value) 94 const unsigned int word, const u8 value)
81{ 95{
@@ -794,6 +808,11 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
794 TXPOWER_G_TO_DEV(info->tx_power1)); 808 TXPOWER_G_TO_DEV(info->tx_power1));
795 rt2800_rfcsr_write(rt2x00dev, 12, rfcsr); 809 rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
796 810
811 rt2800_rfcsr_read(rt2x00dev, 13, &rfcsr);
812 rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER,
813 TXPOWER_G_TO_DEV(info->tx_power2));
814 rt2800_rfcsr_write(rt2x00dev, 13, rfcsr);
815
797 rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr); 816 rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
798 rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset); 817 rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
799 rt2800_rfcsr_write(rt2x00dev, 23, rfcsr); 818 rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
@@ -849,7 +868,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
849 } 868 }
850 869
851 rt2800_register_read(rt2x00dev, TX_BAND_CFG, &reg); 870 rt2800_register_read(rt2x00dev, TX_BAND_CFG, &reg);
852 rt2x00_set_field32(&reg, TX_BAND_CFG_HT40_PLUS, conf_is_ht40_plus(conf)); 871 rt2x00_set_field32(&reg, TX_BAND_CFG_HT40_MINUS, conf_is_ht40_minus(conf));
853 rt2x00_set_field32(&reg, TX_BAND_CFG_A, rf->channel > 14); 872 rt2x00_set_field32(&reg, TX_BAND_CFG_A, rf->channel > 14);
854 rt2x00_set_field32(&reg, TX_BAND_CFG_BG, rf->channel <= 14); 873 rt2x00_set_field32(&reg, TX_BAND_CFG_BG, rf->channel <= 14);
855 rt2800_register_write(rt2x00dev, TX_BAND_CFG, reg); 874 rt2800_register_write(rt2x00dev, TX_BAND_CFG, reg);
@@ -882,7 +901,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
882 rt2800_bbp_write(rt2x00dev, 4, bbp); 901 rt2800_bbp_write(rt2x00dev, 4, bbp);
883 902
884 rt2800_bbp_read(rt2x00dev, 3, &bbp); 903 rt2800_bbp_read(rt2x00dev, 3, &bbp);
885 rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf)); 904 rt2x00_set_field8(&bbp, BBP3_HT40_MINUS, conf_is_ht40_minus(conf));
886 rt2800_bbp_write(rt2x00dev, 3, bbp); 905 rt2800_bbp_write(rt2x00dev, 3, bbp);
887 906
888 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) { 907 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
@@ -1551,6 +1570,9 @@ int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
1551 rt2800_wait_bbp_ready(rt2x00dev))) 1570 rt2800_wait_bbp_ready(rt2x00dev)))
1552 return -EACCES; 1571 return -EACCES;
1553 1572
1573 if (rt2800_is_305x_soc(rt2x00dev))
1574 rt2800_bbp_write(rt2x00dev, 31, 0x08);
1575
1554 rt2800_bbp_write(rt2x00dev, 65, 0x2c); 1576 rt2800_bbp_write(rt2x00dev, 65, 0x2c);
1555 rt2800_bbp_write(rt2x00dev, 66, 0x38); 1577 rt2800_bbp_write(rt2x00dev, 66, 0x38);
1556 1578
@@ -1571,6 +1593,9 @@ int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
1571 rt2800_bbp_write(rt2x00dev, 79, 0x13); 1593 rt2800_bbp_write(rt2x00dev, 79, 0x13);
1572 rt2800_bbp_write(rt2x00dev, 80, 0x05); 1594 rt2800_bbp_write(rt2x00dev, 80, 0x05);
1573 rt2800_bbp_write(rt2x00dev, 81, 0x33); 1595 rt2800_bbp_write(rt2x00dev, 81, 0x33);
1596 } else if (rt2800_is_305x_soc(rt2x00dev)) {
1597 rt2800_bbp_write(rt2x00dev, 78, 0x0e);
1598 rt2800_bbp_write(rt2x00dev, 80, 0x08);
1574 } else { 1599 } else {
1575 rt2800_bbp_write(rt2x00dev, 81, 0x37); 1600 rt2800_bbp_write(rt2x00dev, 81, 0x37);
1576 } 1601 }
@@ -1591,12 +1616,16 @@ int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
1591 if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) || 1616 if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) ||
1592 rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) || 1617 rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
1593 rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) || 1618 rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
1594 rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E)) 1619 rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
1620 rt2800_is_305x_soc(rt2x00dev))
1595 rt2800_bbp_write(rt2x00dev, 103, 0xc0); 1621 rt2800_bbp_write(rt2x00dev, 103, 0xc0);
1596 else 1622 else
1597 rt2800_bbp_write(rt2x00dev, 103, 0x00); 1623 rt2800_bbp_write(rt2x00dev, 103, 0x00);
1598 1624
1599 rt2800_bbp_write(rt2x00dev, 105, 0x05); 1625 if (rt2800_is_305x_soc(rt2x00dev))
1626 rt2800_bbp_write(rt2x00dev, 105, 0x01);
1627 else
1628 rt2800_bbp_write(rt2x00dev, 105, 0x05);
1600 rt2800_bbp_write(rt2x00dev, 106, 0x35); 1629 rt2800_bbp_write(rt2x00dev, 106, 0x35);
1601 1630
1602 if (rt2x00_rt(rt2x00dev, RT3071) || 1631 if (rt2x00_rt(rt2x00dev, RT3071) ||
@@ -1613,11 +1642,6 @@ int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
1613 rt2800_bbp_write(rt2x00dev, 138, value); 1642 rt2800_bbp_write(rt2x00dev, 138, value);
1614 } 1643 }
1615 1644
1616 if (rt2x00_rt(rt2x00dev, RT2872)) {
1617 rt2800_bbp_write(rt2x00dev, 31, 0x08);
1618 rt2800_bbp_write(rt2x00dev, 78, 0x0e);
1619 rt2800_bbp_write(rt2x00dev, 80, 0x08);
1620 }
1621 1645
1622 for (i = 0; i < EEPROM_BBP_SIZE; i++) { 1646 for (i = 0; i < EEPROM_BBP_SIZE; i++) {
1623 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); 1647 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
@@ -1703,7 +1727,8 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
1703 if (!rt2x00_rt(rt2x00dev, RT3070) && 1727 if (!rt2x00_rt(rt2x00dev, RT3070) &&
1704 !rt2x00_rt(rt2x00dev, RT3071) && 1728 !rt2x00_rt(rt2x00dev, RT3071) &&
1705 !rt2x00_rt(rt2x00dev, RT3090) && 1729 !rt2x00_rt(rt2x00dev, RT3090) &&
1706 !rt2x00_rt(rt2x00dev, RT3390)) 1730 !rt2x00_rt(rt2x00dev, RT3390) &&
1731 !rt2800_is_305x_soc(rt2x00dev))
1707 return 0; 1732 return 0;
1708 1733
1709 /* 1734 /*
@@ -1771,6 +1796,40 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
1771 rt2800_rfcsr_write(rt2x00dev, 29, 0x8f); 1796 rt2800_rfcsr_write(rt2x00dev, 29, 0x8f);
1772 rt2800_rfcsr_write(rt2x00dev, 30, 0x20); 1797 rt2800_rfcsr_write(rt2x00dev, 30, 0x20);
1773 rt2800_rfcsr_write(rt2x00dev, 31, 0x0f); 1798 rt2800_rfcsr_write(rt2x00dev, 31, 0x0f);
1799 } else if (rt2800_is_305x_soc(rt2x00dev)) {
1800 rt2800_rfcsr_write(rt2x00dev, 0, 0x50);
1801 rt2800_rfcsr_write(rt2x00dev, 1, 0x01);
1802 rt2800_rfcsr_write(rt2x00dev, 2, 0xf7);
1803 rt2800_rfcsr_write(rt2x00dev, 3, 0x75);
1804 rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
1805 rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
1806 rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
1807 rt2800_rfcsr_write(rt2x00dev, 7, 0x50);
1808 rt2800_rfcsr_write(rt2x00dev, 8, 0x39);
1809 rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
1810 rt2800_rfcsr_write(rt2x00dev, 10, 0x60);
1811 rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
1812 rt2800_rfcsr_write(rt2x00dev, 12, 0x75);
1813 rt2800_rfcsr_write(rt2x00dev, 13, 0x75);
1814 rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
1815 rt2800_rfcsr_write(rt2x00dev, 15, 0x58);
1816 rt2800_rfcsr_write(rt2x00dev, 16, 0xb3);
1817 rt2800_rfcsr_write(rt2x00dev, 17, 0x92);
1818 rt2800_rfcsr_write(rt2x00dev, 18, 0x2c);
1819 rt2800_rfcsr_write(rt2x00dev, 19, 0x02);
1820 rt2800_rfcsr_write(rt2x00dev, 20, 0xba);
1821 rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
1822 rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
1823 rt2800_rfcsr_write(rt2x00dev, 23, 0x31);
1824 rt2800_rfcsr_write(rt2x00dev, 24, 0x08);
1825 rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
1826 rt2800_rfcsr_write(rt2x00dev, 26, 0x25);
1827 rt2800_rfcsr_write(rt2x00dev, 27, 0x23);
1828 rt2800_rfcsr_write(rt2x00dev, 28, 0x13);
1829 rt2800_rfcsr_write(rt2x00dev, 29, 0x83);
1830 rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
1831 rt2800_rfcsr_write(rt2x00dev, 31, 0x00);
1832 return 0;
1774 } 1833 }
1775 1834
1776 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) { 1835 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
@@ -1986,7 +2045,6 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1986 EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); 2045 EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
1987 } else if (rt2x00_rt(rt2x00dev, RT2860) || 2046 } else if (rt2x00_rt(rt2x00dev, RT2860) ||
1988 rt2x00_rt(rt2x00dev, RT2870) || 2047 rt2x00_rt(rt2x00dev, RT2870) ||
1989 rt2x00_rt(rt2x00dev, RT2872) ||
1990 rt2x00_rt(rt2x00dev, RT2872)) { 2048 rt2x00_rt(rt2x00dev, RT2872)) {
1991 /* 2049 /*
1992 * There is a max of 2 RX streams for RT28x0 series 2050 * There is a max of 2 RX streams for RT28x0 series
@@ -2318,8 +2376,11 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2318 else 2376 else
2319 spec->ht.ht_supported = false; 2377 spec->ht.ht_supported = false;
2320 2378
2379 /*
2380 * Don't set IEEE80211_HT_CAP_SUP_WIDTH_20_40 for now as it causes
2381 * reception problems with HT40 capable 11n APs
2382 */
2321 spec->ht.cap = 2383 spec->ht.cap =
2322 IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
2323 IEEE80211_HT_CAP_GRN_FLD | 2384 IEEE80211_HT_CAP_GRN_FLD |
2324 IEEE80211_HT_CAP_SGI_20 | 2385 IEEE80211_HT_CAP_SGI_20 |
2325 IEEE80211_HT_CAP_SGI_40 | 2386 IEEE80211_HT_CAP_SGI_40 |
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 2131f8f0c502..f08b6a37bf2d 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -613,15 +613,23 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
613/* 613/*
614 * TX descriptor initialization 614 * TX descriptor initialization
615 */ 615 */
616static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, 616static int rt2800pci_write_tx_data(struct queue_entry* entry,
617 struct sk_buff *skb, 617 struct txentry_desc *txdesc)
618 struct txentry_desc *txdesc)
619{ 618{
620 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 619 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
621 __le32 *txd = skbdesc->desc; 620 struct sk_buff *skb = entry->skb;
622 __le32 *txwi = (__le32 *)(skb->data - rt2x00dev->ops->extra_tx_headroom); 621 struct skb_frame_desc *skbdesc;
622 int ret;
623 __le32 *txwi;
623 u32 word; 624 u32 word;
624 625
626 ret = rt2x00pci_write_tx_data(entry, txdesc);
627 if (ret)
628 return ret;
629
630 skbdesc = get_skb_frame_desc(skb);
631 txwi = (__le32 *)(skb->data - rt2x00dev->ops->extra_tx_headroom);
632
625 /* 633 /*
626 * Initialize TX Info descriptor 634 * Initialize TX Info descriptor
627 */ 635 */
@@ -655,7 +663,7 @@ static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
655 test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ? 663 test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
656 txdesc->key_idx : 0xff); 664 txdesc->key_idx : 0xff);
657 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT, 665 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
658 skb->len - txdesc->l2pad); 666 txdesc->length);
659 rt2x00_set_field32(&word, TXWI_W1_PACKETID, 667 rt2x00_set_field32(&word, TXWI_W1_PACKETID,
660 skbdesc->entry->queue->qid + 1); 668 skbdesc->entry->queue->qid + 1);
661 rt2x00_desc_write(txwi, 1, word); 669 rt2x00_desc_write(txwi, 1, word);
@@ -670,6 +678,18 @@ static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
670 _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */); 678 _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */);
671 _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */); 679 _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */);
672 680
681 return 0;
682}
683
684
685static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
686 struct sk_buff *skb,
687 struct txentry_desc *txdesc)
688{
689 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
690 __le32 *txd = skbdesc->desc;
691 u32 word;
692
673 /* 693 /*
674 * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1 694 * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
675 * must contains a TXWI structure + 802.11 header + padding + 802.11 695 * must contains a TXWI structure + 802.11 header + padding + 802.11
@@ -875,10 +895,6 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
875 (rt2x00_get_field32(rxwi2, RXWI_W2_RSSI0) + 895 (rt2x00_get_field32(rxwi2, RXWI_W2_RSSI0) +
876 rt2x00_get_field32(rxwi2, RXWI_W2_RSSI1)) / 2; 896 rt2x00_get_field32(rxwi2, RXWI_W2_RSSI1)) / 2;
877 897
878 rxdesc->noise =
879 (rt2x00_get_field32(rxwi3, RXWI_W3_SNR0) +
880 rt2x00_get_field32(rxwi3, RXWI_W3_SNR1)) / 2;
881
882 rxdesc->size = rt2x00_get_field32(rxwi0, RXWI_W0_MPDU_TOTAL_BYTE_COUNT); 898 rxdesc->size = rt2x00_get_field32(rxwi0, RXWI_W0_MPDU_TOTAL_BYTE_COUNT);
883 899
884 /* 900 /*
@@ -1135,7 +1151,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
1135 .reset_tuner = rt2800_reset_tuner, 1151 .reset_tuner = rt2800_reset_tuner,
1136 .link_tuner = rt2800_link_tuner, 1152 .link_tuner = rt2800_link_tuner,
1137 .write_tx_desc = rt2800pci_write_tx_desc, 1153 .write_tx_desc = rt2800pci_write_tx_desc,
1138 .write_tx_data = rt2x00pci_write_tx_data, 1154 .write_tx_data = rt2800pci_write_tx_data,
1139 .write_beacon = rt2800pci_write_beacon, 1155 .write_beacon = rt2800pci_write_beacon,
1140 .kick_tx_queue = rt2800pci_kick_tx_queue, 1156 .kick_tx_queue = rt2800pci_kick_tx_queue,
1141 .kill_tx_queue = rt2800pci_kill_tx_queue, 1157 .kill_tx_queue = rt2800pci_kill_tx_queue,
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 6b809ab42c61..e3f3a97db807 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -437,7 +437,7 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
437 test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ? 437 test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
438 txdesc->key_idx : 0xff); 438 txdesc->key_idx : 0xff);
439 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT, 439 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
440 skb->len - txdesc->l2pad); 440 txdesc->length);
441 rt2x00_set_field32(&word, TXWI_W1_PACKETID, 441 rt2x00_set_field32(&word, TXWI_W1_PACKETID,
442 skbdesc->entry->queue->qid + 1); 442 skbdesc->entry->queue->qid + 1);
443 rt2x00_desc_write(txwi, 1, word); 443 rt2x00_desc_write(txwi, 1, word);
@@ -645,10 +645,6 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
645 (rt2x00_get_field32(rxwi2, RXWI_W2_RSSI0) + 645 (rt2x00_get_field32(rxwi2, RXWI_W2_RSSI0) +
646 rt2x00_get_field32(rxwi2, RXWI_W2_RSSI1)) / 2; 646 rt2x00_get_field32(rxwi2, RXWI_W2_RSSI1)) / 2;
647 647
648 rxdesc->noise =
649 (rt2x00_get_field32(rxwi3, RXWI_W3_SNR0) +
650 rt2x00_get_field32(rxwi3, RXWI_W3_SNR1)) / 2;
651
652 rxdesc->size = rt2x00_get_field32(rxwi0, RXWI_W0_MPDU_TOTAL_BYTE_COUNT); 648 rxdesc->size = rt2x00_get_field32(rxwi0, RXWI_W0_MPDU_TOTAL_BYTE_COUNT);
653 649
654 /* 650 /*
@@ -806,6 +802,10 @@ static struct usb_device_id rt2800usb_device_table[] = {
806 { USB_DEVICE(0x07b8, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) }, 802 { USB_DEVICE(0x07b8, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
807 { USB_DEVICE(0x07b8, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) }, 803 { USB_DEVICE(0x07b8, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
808 { USB_DEVICE(0x1482, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) }, 804 { USB_DEVICE(0x1482, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
805 /* Allwin */
806 { USB_DEVICE(0x8516, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
807 { USB_DEVICE(0x8516, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
808 { USB_DEVICE(0x8516, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
809 /* Amit */ 809 /* Amit */
810 { USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) }, 810 { USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
811 /* Askey */ 811 /* Askey */
@@ -848,6 +848,11 @@ static struct usb_device_id rt2800usb_device_table[] = {
848 /* Hawking */ 848 /* Hawking */
849 { USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) }, 849 { USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) },
850 { USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) }, 850 { USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) },
851 { USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) },
852 { USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) },
853 { USB_DEVICE(0x0e66, 0x0013), USB_DEVICE_DATA(&rt2800usb_ops) },
854 { USB_DEVICE(0x0e66, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) },
855 { USB_DEVICE(0x0e66, 0x0018), USB_DEVICE_DATA(&rt2800usb_ops) },
851 /* Linksys */ 856 /* Linksys */
852 { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) }, 857 { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) },
853 { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) }, 858 { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -907,6 +912,10 @@ static struct usb_device_id rt2800usb_device_table[] = {
907 { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, 912 { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
908 /* AirTies */ 913 /* AirTies */
909 { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) }, 914 { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) },
915 /* Allwin */
916 { USB_DEVICE(0x8516, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
917 { USB_DEVICE(0x8516, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
918 { USB_DEVICE(0x8516, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
910 /* ASUS */ 919 /* ASUS */
911 { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) }, 920 { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
912 /* AzureWave */ 921 /* AzureWave */
@@ -991,6 +1000,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
991 { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) }, 1000 { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
992#endif 1001#endif
993#ifdef CONFIG_RT2800USB_RT35XX 1002#ifdef CONFIG_RT2800USB_RT35XX
1003 /* Allwin */
1004 { USB_DEVICE(0x8516, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
994 /* Askey */ 1005 /* Askey */
995 { USB_DEVICE(0x1690, 0x0744), USB_DEVICE_DATA(&rt2800usb_ops) }, 1006 { USB_DEVICE(0x1690, 0x0744), USB_DEVICE_DATA(&rt2800usb_ops) },
996 /* Cisco */ 1007 /* Cisco */
@@ -1012,16 +1023,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
1012#ifdef CONFIG_RT2800USB_UNKNOWN 1023#ifdef CONFIG_RT2800USB_UNKNOWN
1013 /* 1024 /*
1014 * Unclear what kind of devices these are (they aren't supported by the 1025 * Unclear what kind of devices these are (they aren't supported by the
1015 * vendor driver). 1026 * vendor linux driver).
1016 */ 1027 */
1017 /* Allwin */
1018 { USB_DEVICE(0x8516, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
1019 { USB_DEVICE(0x8516, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
1020 { USB_DEVICE(0x8516, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
1021 { USB_DEVICE(0x8516, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
1022 { USB_DEVICE(0x8516, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
1023 { USB_DEVICE(0x8516, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
1024 { USB_DEVICE(0x8516, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
1025 /* Amigo */ 1028 /* Amigo */
1026 { USB_DEVICE(0x0e0b, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) }, 1029 { USB_DEVICE(0x0e0b, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
1027 { USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) }, 1030 { USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -1033,6 +1036,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
1033 /* AzureWave */ 1036 /* AzureWave */
1034 { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) }, 1037 { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) },
1035 { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) }, 1038 { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) },
1039 { USB_DEVICE(0x13d3, 0x3322), USB_DEVICE_DATA(&rt2800usb_ops) },
1036 /* Belkin */ 1040 /* Belkin */
1037 { USB_DEVICE(0x050d, 0x825a), USB_DEVICE_DATA(&rt2800usb_ops) }, 1041 { USB_DEVICE(0x050d, 0x825a), USB_DEVICE_DATA(&rt2800usb_ops) },
1038 /* Buffalo */ 1042 /* Buffalo */
@@ -1051,15 +1055,13 @@ static struct usb_device_id rt2800usb_device_table[] = {
1051 { USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) }, 1055 { USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) },
1052 { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) }, 1056 { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) },
1053 { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) }, 1057 { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) },
1058 { USB_DEVICE(0x07d1, 0x3c17), USB_DEVICE_DATA(&rt2800usb_ops) },
1054 /* Encore */ 1059 /* Encore */
1055 { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) }, 1060 { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) },
1056 /* Gemtek */ 1061 /* Gemtek */
1057 { USB_DEVICE(0x15a9, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) }, 1062 { USB_DEVICE(0x15a9, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
1058 /* Gigabyte */ 1063 /* Gigabyte */
1059 { USB_DEVICE(0x1044, 0x800c), USB_DEVICE_DATA(&rt2800usb_ops) }, 1064 { USB_DEVICE(0x1044, 0x800c), USB_DEVICE_DATA(&rt2800usb_ops) },
1060 /* Hawking */
1061 { USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) },
1062 { USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) },
1063 /* LevelOne */ 1065 /* LevelOne */
1064 { USB_DEVICE(0x1740, 0x0605), USB_DEVICE_DATA(&rt2800usb_ops) }, 1066 { USB_DEVICE(0x1740, 0x0605), USB_DEVICE_DATA(&rt2800usb_ops) },
1065 { USB_DEVICE(0x1740, 0x0615), USB_DEVICE_DATA(&rt2800usb_ops) }, 1067 { USB_DEVICE(0x1740, 0x0615), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -1070,11 +1072,13 @@ static struct usb_device_id rt2800usb_device_table[] = {
1070 /* Motorola */ 1072 /* Motorola */
1071 { USB_DEVICE(0x100d, 0x9032), USB_DEVICE_DATA(&rt2800usb_ops) }, 1073 { USB_DEVICE(0x100d, 0x9032), USB_DEVICE_DATA(&rt2800usb_ops) },
1072 /* Ovislink */ 1074 /* Ovislink */
1075 { USB_DEVICE(0x1b75, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
1073 { USB_DEVICE(0x1b75, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, 1076 { USB_DEVICE(0x1b75, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
1074 /* Pegatron */ 1077 /* Pegatron */
1075 { USB_DEVICE(0x05a6, 0x0101), USB_DEVICE_DATA(&rt2800usb_ops) }, 1078 { USB_DEVICE(0x05a6, 0x0101), USB_DEVICE_DATA(&rt2800usb_ops) },
1076 { USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) }, 1079 { USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) },
1077 { USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) }, 1080 { USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
1081 { USB_DEVICE(0x1d4d, 0x0011), USB_DEVICE_DATA(&rt2800usb_ops) },
1078 /* Planex */ 1082 /* Planex */
1079 { USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) }, 1083 { USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) },
1080 /* Qcom */ 1084 /* Qcom */
@@ -1083,6 +1087,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
1083 { USB_DEVICE(0x083a, 0xa512), USB_DEVICE_DATA(&rt2800usb_ops) }, 1087 { USB_DEVICE(0x083a, 0xa512), USB_DEVICE_DATA(&rt2800usb_ops) },
1084 { USB_DEVICE(0x083a, 0xc522), USB_DEVICE_DATA(&rt2800usb_ops) }, 1088 { USB_DEVICE(0x083a, 0xc522), USB_DEVICE_DATA(&rt2800usb_ops) },
1085 { USB_DEVICE(0x083a, 0xd522), USB_DEVICE_DATA(&rt2800usb_ops) }, 1089 { USB_DEVICE(0x083a, 0xd522), USB_DEVICE_DATA(&rt2800usb_ops) },
1090 { USB_DEVICE(0x083a, 0xf511), USB_DEVICE_DATA(&rt2800usb_ops) },
1086 /* Sweex */ 1091 /* Sweex */
1087 { USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) }, 1092 { USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) },
1088 { USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) }, 1093 { USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 4de505b98331..4f9b666f7a7f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -549,7 +549,8 @@ struct rt2x00lib_ops {
549 void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev, 549 void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev,
550 struct sk_buff *skb, 550 struct sk_buff *skb,
551 struct txentry_desc *txdesc); 551 struct txentry_desc *txdesc);
552 int (*write_tx_data) (struct queue_entry *entry); 552 int (*write_tx_data) (struct queue_entry *entry,
553 struct txentry_desc *txdesc);
553 void (*write_beacon) (struct queue_entry *entry); 554 void (*write_beacon) (struct queue_entry *entry);
554 int (*get_tx_data_len) (struct queue_entry *entry); 555 int (*get_tx_data_len) (struct queue_entry *entry);
555 void (*kick_tx_queue) (struct rt2x00_dev *rt2x00dev, 556 void (*kick_tx_queue) (struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index eda73ba735a6..3ae468c4d760 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -435,7 +435,6 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
435 rx_status->mactime = rxdesc.timestamp; 435 rx_status->mactime = rxdesc.timestamp;
436 rx_status->rate_idx = rate_idx; 436 rx_status->rate_idx = rate_idx;
437 rx_status->signal = rxdesc.rssi; 437 rx_status->signal = rxdesc.rssi;
438 rx_status->noise = rxdesc.noise;
439 rx_status->flag = rxdesc.flags; 438 rx_status->flag = rxdesc.flags;
440 rx_status->antenna = rt2x00dev->link.ant.active.rx; 439 rx_status->antenna = rt2x00dev->link.ant.active.rx;
441 440
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index cf3f1c0c4382..4b941e9c794e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -63,7 +63,8 @@ EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
63/* 63/*
64 * TX data handlers. 64 * TX data handlers.
65 */ 65 */
66int rt2x00pci_write_tx_data(struct queue_entry *entry) 66int rt2x00pci_write_tx_data(struct queue_entry *entry,
67 struct txentry_desc *txdesc)
67{ 68{
68 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 69 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
69 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 70 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index 8149ff68410a..51bcef3839ce 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -92,7 +92,8 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
92 * This function will initialize the DMA and skb descriptor 92 * This function will initialize the DMA and skb descriptor
93 * to prepare the entry for the actual TX operation. 93 * to prepare the entry for the actual TX operation.
94 */ 94 */
95int rt2x00pci_write_tx_data(struct queue_entry *entry); 95int rt2x00pci_write_tx_data(struct queue_entry *entry,
96 struct txentry_desc *txdesc);
96 97
97/** 98/**
98 * struct queue_entry_priv_pci: Per entry PCI specific information 99 * struct queue_entry_priv_pci: Per entry PCI specific information
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index a0bd36fc4d2e..e22029fcf411 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -334,12 +334,10 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
334 txdesc->aifs = entry->queue->aifs; 334 txdesc->aifs = entry->queue->aifs;
335 335
336 /* 336 /*
337 * Header and alignment information. 337 * Header and frame information.
338 */ 338 */
339 txdesc->length = entry->skb->len;
339 txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb); 340 txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
340 if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags) &&
341 (entry->skb->len > txdesc->header_length))
342 txdesc->l2pad = L2PAD_SIZE(txdesc->header_length);
343 341
344 /* 342 /*
345 * Check whether this frame is to be acked. 343 * Check whether this frame is to be acked.
@@ -526,7 +524,8 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
526 * call failed. Since we always return NETDEV_TX_OK to mac80211, 524 * call failed. Since we always return NETDEV_TX_OK to mac80211,
527 * this frame will simply be dropped. 525 * this frame will simply be dropped.
528 */ 526 */
529 if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) { 527 if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry,
528 &txdesc))) {
530 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 529 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
531 entry->skb = NULL; 530 entry->skb = NULL;
532 return -EIO; 531 return -EIO;
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index c1e482bb37b3..94a48c174d67 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -183,7 +183,6 @@ enum rxdone_entry_desc_flags {
183 * @timestamp: RX Timestamp 183 * @timestamp: RX Timestamp
184 * @signal: Signal of the received frame. 184 * @signal: Signal of the received frame.
185 * @rssi: RSSI of the received frame. 185 * @rssi: RSSI of the received frame.
186 * @noise: Measured noise during frame reception.
187 * @size: Data size of the received frame. 186 * @size: Data size of the received frame.
188 * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags). 187 * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags).
189 * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags). 188 * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags).
@@ -197,7 +196,6 @@ struct rxdone_entry_desc {
197 u64 timestamp; 196 u64 timestamp;
198 int signal; 197 int signal;
199 int rssi; 198 int rssi;
200 int noise;
201 int size; 199 int size;
202 int flags; 200 int flags;
203 int dev_flags; 201 int dev_flags;
@@ -287,8 +285,8 @@ enum txentry_desc_flags {
287 * 285 *
288 * @flags: Descriptor flags (See &enum queue_entry_flags). 286 * @flags: Descriptor flags (See &enum queue_entry_flags).
289 * @queue: Queue identification (See &enum data_queue_qid). 287 * @queue: Queue identification (See &enum data_queue_qid).
288 * @length: Length of the entire frame.
290 * @header_length: Length of 802.11 header. 289 * @header_length: Length of 802.11 header.
291 * @l2pad: Amount of padding to align 802.11 payload to 4-byte boundrary.
292 * @length_high: PLCP length high word. 290 * @length_high: PLCP length high word.
293 * @length_low: PLCP length low word. 291 * @length_low: PLCP length low word.
294 * @signal: PLCP signal. 292 * @signal: PLCP signal.
@@ -313,8 +311,8 @@ struct txentry_desc {
313 311
314 enum data_queue_qid queue; 312 enum data_queue_qid queue;
315 313
314 u16 length;
316 u16 header_length; 315 u16 header_length;
317 u16 l2pad;
318 316
319 u16 length_high; 317 u16 length_high;
320 u16 length_low; 318 u16 length_low;
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index f9a7f8b17411..da111c0c2928 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -216,7 +216,8 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
216 rt2x00lib_txdone(entry, &txdesc); 216 rt2x00lib_txdone(entry, &txdesc);
217} 217}
218 218
219int rt2x00usb_write_tx_data(struct queue_entry *entry) 219int rt2x00usb_write_tx_data(struct queue_entry *entry,
220 struct txentry_desc *txdesc)
220{ 221{
221 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 222 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
222 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); 223 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index 3da6841b5d42..621d0f829251 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -376,7 +376,8 @@ void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev);
376 * This function will initialize the URB and skb descriptor 376 * This function will initialize the URB and skb descriptor
377 * to prepare the entry for the actual TX operation. 377 * to prepare the entry for the actual TX operation.
378 */ 378 */
379int rt2x00usb_write_tx_data(struct queue_entry *entry); 379int rt2x00usb_write_tx_data(struct queue_entry *entry,
380 struct txentry_desc *txdesc);
380 381
381/** 382/**
382 * struct queue_entry_priv_usb: Per entry USB specific information 383 * struct queue_entry_priv_usb: Per entry USB specific information
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index b9885981f3a8..26ee7911fba9 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -1809,7 +1809,8 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1809 1809
1810 if (skbdesc->desc_len > TXINFO_SIZE) { 1810 if (skbdesc->desc_len > TXINFO_SIZE) {
1811 rt2x00_desc_read(txd, 11, &word); 1811 rt2x00_desc_read(txd, 11, &word);
1812 rt2x00_set_field32(&word, TXD_W11_BUFFER_LENGTH0, skb->len); 1812 rt2x00_set_field32(&word, TXD_W11_BUFFER_LENGTH0,
1813 txdesc->length);
1813 rt2x00_desc_write(txd, 11, word); 1814 rt2x00_desc_write(txd, 11, word);
1814 } 1815 }
1815 1816
@@ -1832,7 +1833,7 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1832 rt2x00_set_field32(&word, TXD_W0_KEY_TABLE, 1833 rt2x00_set_field32(&word, TXD_W0_KEY_TABLE,
1833 test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags)); 1834 test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags));
1834 rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx); 1835 rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx);
1835 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len); 1836 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
1836 rt2x00_set_field32(&word, TXD_W0_BURST, 1837 rt2x00_set_field32(&word, TXD_W0_BURST,
1837 test_bit(ENTRY_TXD_BURST, &txdesc->flags)); 1838 test_bit(ENTRY_TXD_BURST, &txdesc->flags));
1838 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher); 1839 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher);
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 576ea9dd2824..39b3c6d04af4 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -1495,7 +1495,7 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1495 rt2x00_set_field32(&word, TXD_W0_KEY_TABLE, 1495 rt2x00_set_field32(&word, TXD_W0_KEY_TABLE,
1496 test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags)); 1496 test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags));
1497 rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx); 1497 rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx);
1498 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len); 1498 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
1499 rt2x00_set_field32(&word, TXD_W0_BURST2, 1499 rt2x00_set_field32(&word, TXD_W0_BURST2,
1500 test_bit(ENTRY_TXD_BURST, &txdesc->flags)); 1500 test_bit(ENTRY_TXD_BURST, &txdesc->flags));
1501 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher); 1501 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher);
diff --git a/drivers/net/wireless/rtl818x/Kconfig b/drivers/net/wireless/rtl818x/Kconfig
new file mode 100644
index 000000000000..17d80fe556de
--- /dev/null
+++ b/drivers/net/wireless/rtl818x/Kconfig
@@ -0,0 +1,88 @@
1#
2# RTL818X Wireless LAN device configuration
3#
4config RTL8180
5 tristate "Realtek 8180/8185 PCI support"
6 depends on MAC80211 && PCI && EXPERIMENTAL
7 select EEPROM_93CX6
8 ---help---
9 This is a driver for RTL8180 and RTL8185 based cards.
10 These are PCI based chips found in cards such as:
11
12 (RTL8185 802.11g)
13 A-Link WL54PC
14
15 (RTL8180 802.11b)
16 Belkin F5D6020 v3
17 Belkin F5D6020 v3
18 Dlink DWL-610
19 Dlink DWL-510
20 Netgear MA521
21 Level-One WPC-0101
22 Acer Aspire 1357 LMi
23 VCTnet PC-11B1
24 Ovislink AirLive WL-1120PCM
25 Mentor WL-PCI
26 Linksys WPC11 v4
27 TrendNET TEW-288PI
28 D-Link DWL-520 Rev D
29 Repotec RP-WP7126
30 TP-Link TL-WN250/251
31 Zonet ZEW1000
32 Longshine LCS-8031-R
33 HomeLine HLW-PCC200
34 GigaFast WF721-AEX
35 Planet WL-3553
36 Encore ENLWI-PCI1-NT
37 TrendNET TEW-266PC
38 Gigabyte GN-WLMR101
39 Siemens-fujitsu Amilo D1840W
40 Edimax EW-7126
41 PheeNet WL-11PCIR
42 Tonze PC-2100T
43 Planet WL-8303
44 Dlink DWL-650 v M1
45 Edimax EW-7106
46 Q-Tec 770WC
47 Topcom Skyr@cer 4011b
48 Roper FreeLan 802.11b (edition 2004)
49 Wistron Neweb Corp CB-200B
50 Pentagram HorNET
51 QTec 775WC
52 TwinMOS Booming B Series
53 Micronet SP906BB
54 Sweex LC700010
55 Surecom EP-9428
56 Safecom SWLCR-1100
57
58 Thanks to Realtek for their support!
59
60config RTL8187
61 tristate "Realtek 8187 and 8187B USB support"
62 depends on MAC80211 && USB
63 select EEPROM_93CX6
64 ---help---
65 This is a driver for RTL8187 and RTL8187B based cards.
66 These are USB based chips found in devices such as:
67
68 Netgear WG111v2
69 Level 1 WNC-0301USB
70 Micronet SP907GK V5
71 Encore ENUWI-G2
72 Trendnet TEW-424UB
73 ASUS P5B Deluxe/P5K Premium motherboards
74 Toshiba Satellite Pro series of laptops
75 Asus Wireless Link
76 Linksys WUSB54GC-EU v2
77 (v1 = rt73usb; v3 is rt2070-based,
78 use staging/rt3070 or try rt2800usb)
79
80 Thanks to Realtek for their support!
81
82# If possible, automatically enable LEDs for RTL8187.
83
84config RTL8187_LEDS
85 bool
86 depends on RTL8187 && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = RTL8187)
87 default y
88
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 6b46329b732f..21307f2412b8 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -188,6 +188,7 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
188 info->flags |= IEEE80211_TX_STAT_ACK; 188 info->flags |= IEEE80211_TX_STAT_ACK;
189 189
190 info->status.rates[0].count = (flags & 0xFF) + 1; 190 info->status.rates[0].count = (flags & 0xFF) + 1;
191 info->status.rates[1].idx = -1;
191 192
192 ieee80211_tx_status_irqsafe(dev, skb); 193 ieee80211_tx_status_irqsafe(dev, skb);
193 if (ring->entries - skb_queue_len(&ring->queue) == 2) 194 if (ring->entries - skb_queue_len(&ring->queue) == 2)
@@ -297,7 +298,7 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
297 entry->flags = cpu_to_le32(tx_flags); 298 entry->flags = cpu_to_le32(tx_flags);
298 __skb_queue_tail(&ring->queue, skb); 299 __skb_queue_tail(&ring->queue, skb);
299 if (ring->entries - skb_queue_len(&ring->queue) < 2) 300 if (ring->entries - skb_queue_len(&ring->queue) < 2)
300 ieee80211_stop_queue(dev, skb_get_queue_mapping(skb)); 301 ieee80211_stop_queue(dev, prio);
301 spin_unlock_irqrestore(&priv->lock, flags); 302 spin_unlock_irqrestore(&priv->lock, flags);
302 303
303 rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4))); 304 rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4)));
@@ -827,6 +828,7 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
827 const char *chip_name, *rf_name = NULL; 828 const char *chip_name, *rf_name = NULL;
828 u32 reg; 829 u32 reg;
829 u16 eeprom_val; 830 u16 eeprom_val;
831 u8 mac_addr[ETH_ALEN];
830 832
831 err = pci_enable_device(pdev); 833 err = pci_enable_device(pdev);
832 if (err) { 834 if (err) {
@@ -987,12 +989,13 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
987 eeprom_93cx6_read(&eeprom, 0x19, &priv->rfparam); 989 eeprom_93cx6_read(&eeprom, 0x19, &priv->rfparam);
988 } 990 }
989 991
990 eeprom_93cx6_multiread(&eeprom, 0x7, (__le16 *)dev->wiphy->perm_addr, 3); 992 eeprom_93cx6_multiread(&eeprom, 0x7, (__le16 *)mac_addr, 3);
991 if (!is_valid_ether_addr(dev->wiphy->perm_addr)) { 993 if (!is_valid_ether_addr(mac_addr)) {
992 printk(KERN_WARNING "%s (rtl8180): Invalid hwaddr! Using" 994 printk(KERN_WARNING "%s (rtl8180): Invalid hwaddr! Using"
993 " randomly generated MAC addr\n", pci_name(pdev)); 995 " randomly generated MAC addr\n", pci_name(pdev));
994 random_ether_addr(dev->wiphy->perm_addr); 996 random_ether_addr(mac_addr);
995 } 997 }
998 SET_IEEE80211_PERM_ADDR(dev, mac_addr);
996 999
997 /* CCK TX power */ 1000 /* CCK TX power */
998 for (i = 0; i < 14; i += 2) { 1001 for (i = 0; i < 14; i += 2) {
@@ -1024,7 +1027,7 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
1024 } 1027 }
1025 1028
1026 printk(KERN_INFO "%s: hwaddr %pM, %s + %s\n", 1029 printk(KERN_INFO "%s: hwaddr %pM, %s + %s\n",
1027 wiphy_name(dev->wiphy), dev->wiphy->perm_addr, 1030 wiphy_name(dev->wiphy), mac_addr,
1028 chip_name, priv->rf->name); 1031 chip_name, priv->rf->name);
1029 1032
1030 return 0; 1033 return 0;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 738921fda027..891b8490e349 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -1333,6 +1333,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1333 u16 txpwr, reg; 1333 u16 txpwr, reg;
1334 u16 product_id = le16_to_cpu(udev->descriptor.idProduct); 1334 u16 product_id = le16_to_cpu(udev->descriptor.idProduct);
1335 int err, i; 1335 int err, i;
1336 u8 mac_addr[ETH_ALEN];
1336 1337
1337 dev = ieee80211_alloc_hw(sizeof(*priv), &rtl8187_ops); 1338 dev = ieee80211_alloc_hw(sizeof(*priv), &rtl8187_ops);
1338 if (!dev) { 1339 if (!dev) {
@@ -1390,12 +1391,13 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1390 udelay(10); 1391 udelay(10);
1391 1392
1392 eeprom_93cx6_multiread(&eeprom, RTL8187_EEPROM_MAC_ADDR, 1393 eeprom_93cx6_multiread(&eeprom, RTL8187_EEPROM_MAC_ADDR,
1393 (__le16 __force *)dev->wiphy->perm_addr, 3); 1394 (__le16 __force *)mac_addr, 3);
1394 if (!is_valid_ether_addr(dev->wiphy->perm_addr)) { 1395 if (!is_valid_ether_addr(mac_addr)) {
1395 printk(KERN_WARNING "rtl8187: Invalid hwaddr! Using randomly " 1396 printk(KERN_WARNING "rtl8187: Invalid hwaddr! Using randomly "
1396 "generated MAC address\n"); 1397 "generated MAC address\n");
1397 random_ether_addr(dev->wiphy->perm_addr); 1398 random_ether_addr(mac_addr);
1398 } 1399 }
1400 SET_IEEE80211_PERM_ADDR(dev, mac_addr);
1399 1401
1400 channel = priv->channels; 1402 channel = priv->channels;
1401 for (i = 0; i < 3; i++) { 1403 for (i = 0; i < 3; i++) {
@@ -1526,7 +1528,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1526 skb_queue_head_init(&priv->b_tx_status.queue); 1528 skb_queue_head_init(&priv->b_tx_status.queue);
1527 1529
1528 printk(KERN_INFO "%s: hwaddr %pM, %s V%d + %s, rfkill mask %d\n", 1530 printk(KERN_INFO "%s: hwaddr %pM, %s V%d + %s, rfkill mask %d\n",
1529 wiphy_name(dev->wiphy), dev->wiphy->perm_addr, 1531 wiphy_name(dev->wiphy), mac_addr,
1530 chip_name, priv->asic_rev, priv->rf->name, priv->rfkill_mask); 1532 chip_name, priv->asic_rev, priv->rf->name, priv->rfkill_mask);
1531 1533
1532#ifdef CONFIG_RTL8187_LEDS 1534#ifdef CONFIG_RTL8187_LEDS
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 4d479708158d..00b24282fc73 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -857,6 +857,7 @@ out:
857} 857}
858 858
859static int wl1251_op_hw_scan(struct ieee80211_hw *hw, 859static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
860 struct ieee80211_vif *vif,
860 struct cfg80211_scan_request *req) 861 struct cfg80211_scan_request *req)
861{ 862{
862 struct wl1251 *wl = hw->priv; 863 struct wl1251 *wl = hw->priv;
@@ -1196,6 +1197,66 @@ static const struct ieee80211_ops wl1251_ops = {
1196 .conf_tx = wl1251_op_conf_tx, 1197 .conf_tx = wl1251_op_conf_tx,
1197}; 1198};
1198 1199
1200static int wl1251_read_eeprom_byte(struct wl1251 *wl, off_t offset, u8 *data)
1201{
1202 unsigned long timeout;
1203
1204 wl1251_reg_write32(wl, EE_ADDR, offset);
1205 wl1251_reg_write32(wl, EE_CTL, EE_CTL_READ);
1206
1207 /* EE_CTL_READ clears when data is ready */
1208 timeout = jiffies + msecs_to_jiffies(100);
1209 while (1) {
1210 if (!(wl1251_reg_read32(wl, EE_CTL) & EE_CTL_READ))
1211 break;
1212
1213 if (time_after(jiffies, timeout))
1214 return -ETIMEDOUT;
1215
1216 msleep(1);
1217 }
1218
1219 *data = wl1251_reg_read32(wl, EE_DATA);
1220 return 0;
1221}
1222
1223static int wl1251_read_eeprom(struct wl1251 *wl, off_t offset,
1224 u8 *data, size_t len)
1225{
1226 size_t i;
1227 int ret;
1228
1229 wl1251_reg_write32(wl, EE_START, 0);
1230
1231 for (i = 0; i < len; i++) {
1232 ret = wl1251_read_eeprom_byte(wl, offset + i, &data[i]);
1233 if (ret < 0)
1234 return ret;
1235 }
1236
1237 return 0;
1238}
1239
1240static int wl1251_read_eeprom_mac(struct wl1251 *wl)
1241{
1242 u8 mac[ETH_ALEN];
1243 int i, ret;
1244
1245 wl1251_set_partition(wl, 0, 0, REGISTERS_BASE, REGISTERS_DOWN_SIZE);
1246
1247 ret = wl1251_read_eeprom(wl, 0x1c, mac, sizeof(mac));
1248 if (ret < 0) {
1249 wl1251_warning("failed to read MAC address from EEPROM");
1250 return ret;
1251 }
1252
1253 /* MAC is stored in reverse order */
1254 for (i = 0; i < ETH_ALEN; i++)
1255 wl->mac_addr[i] = mac[ETH_ALEN - i - 1];
1256
1257 return 0;
1258}
1259
1199static int wl1251_register_hw(struct wl1251 *wl) 1260static int wl1251_register_hw(struct wl1251 *wl)
1200{ 1261{
1201 int ret; 1262 int ret;
@@ -1231,7 +1292,6 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
1231 wl->hw->channel_change_time = 10000; 1292 wl->hw->channel_change_time = 10000;
1232 1293
1233 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | 1294 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
1234 IEEE80211_HW_NOISE_DBM |
1235 IEEE80211_HW_SUPPORTS_PS | 1295 IEEE80211_HW_SUPPORTS_PS |
1236 IEEE80211_HW_BEACON_FILTER | 1296 IEEE80211_HW_BEACON_FILTER |
1237 IEEE80211_HW_SUPPORTS_UAPSD; 1297 IEEE80211_HW_SUPPORTS_UAPSD;
@@ -1242,6 +1302,9 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
1242 1302
1243 wl->hw->queues = 4; 1303 wl->hw->queues = 4;
1244 1304
1305 if (wl->use_eeprom)
1306 wl1251_read_eeprom_mac(wl);
1307
1245 ret = wl1251_register_hw(wl); 1308 ret = wl1251_register_hw(wl);
1246 if (ret) 1309 if (ret)
1247 goto out; 1310 goto out;
diff --git a/drivers/net/wireless/wl12xx/wl1251_reg.h b/drivers/net/wireless/wl12xx/wl1251_reg.h
index 0ca3b4326056..d16edd9bf06c 100644
--- a/drivers/net/wireless/wl12xx/wl1251_reg.h
+++ b/drivers/net/wireless/wl12xx/wl1251_reg.h
@@ -46,7 +46,14 @@
46#define SOR_CFG (REGISTERS_BASE + 0x0800) 46#define SOR_CFG (REGISTERS_BASE + 0x0800)
47#define ECPU_CTRL (REGISTERS_BASE + 0x0804) 47#define ECPU_CTRL (REGISTERS_BASE + 0x0804)
48#define HI_CFG (REGISTERS_BASE + 0x0808) 48#define HI_CFG (REGISTERS_BASE + 0x0808)
49
50/* EEPROM registers */
49#define EE_START (REGISTERS_BASE + 0x080C) 51#define EE_START (REGISTERS_BASE + 0x080C)
52#define EE_CTL (REGISTERS_BASE + 0x2000)
53#define EE_DATA (REGISTERS_BASE + 0x2004)
54#define EE_ADDR (REGISTERS_BASE + 0x2008)
55
56#define EE_CTL_READ 2
50 57
51#define CHIP_ID_B (REGISTERS_BASE + 0x5674) 58#define CHIP_ID_B (REGISTERS_BASE + 0x5674)
52 59
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c
index 6f229e0990f4..af5c67b4da95 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.c
@@ -74,12 +74,6 @@ static void wl1251_rx_status(struct wl1251 *wl,
74 74
75 status->signal = desc->rssi; 75 status->signal = desc->rssi;
76 76
77 /*
78 * FIXME: guessing that snr needs to be divided by two, otherwise
79 * the values don't make any sense
80 */
81 status->noise = desc->rssi - desc->snr / 2;
82
83 status->freq = ieee80211_channel_to_frequency(desc->channel); 77 status->freq = ieee80211_channel_to_frequency(desc->channel);
84 78
85 status->flag |= RX_FLAG_TSFT; 79 status->flag |= RX_FLAG_TSFT;
diff --git a/drivers/net/wireless/wl12xx/wl1251_sdio.c b/drivers/net/wireless/wl12xx/wl1251_sdio.c
index 2051ef06e9ec..d234285c2c81 100644
--- a/drivers/net/wireless/wl12xx/wl1251_sdio.c
+++ b/drivers/net/wireless/wl12xx/wl1251_sdio.c
@@ -23,6 +23,9 @@
23#include <linux/mod_devicetable.h> 23#include <linux/mod_devicetable.h>
24#include <linux/mmc/sdio_func.h> 24#include <linux/mmc/sdio_func.h>
25#include <linux/mmc/sdio_ids.h> 25#include <linux/mmc/sdio_ids.h>
26#include <linux/platform_device.h>
27#include <linux/spi/wl12xx.h>
28#include <linux/irq.h>
26 29
27#include "wl1251.h" 30#include "wl1251.h"
28 31
@@ -34,6 +37,8 @@
34#define SDIO_DEVICE_ID_TI_WL1251 0x9066 37#define SDIO_DEVICE_ID_TI_WL1251 0x9066
35#endif 38#endif
36 39
40static struct wl12xx_platform_data *wl12xx_board_data;
41
37static struct sdio_func *wl_to_func(struct wl1251 *wl) 42static struct sdio_func *wl_to_func(struct wl1251 *wl)
38{ 43{
39 return wl->if_priv; 44 return wl->if_priv;
@@ -130,18 +135,60 @@ static void wl1251_sdio_disable_irq(struct wl1251 *wl)
130 sdio_release_host(func); 135 sdio_release_host(func);
131} 136}
132 137
138/* Interrupts when using dedicated WLAN_IRQ pin */
139static irqreturn_t wl1251_line_irq(int irq, void *cookie)
140{
141 struct wl1251 *wl = cookie;
142
143 ieee80211_queue_work(wl->hw, &wl->irq_work);
144
145 return IRQ_HANDLED;
146}
147
148static void wl1251_enable_line_irq(struct wl1251 *wl)
149{
150 return enable_irq(wl->irq);
151}
152
153static void wl1251_disable_line_irq(struct wl1251 *wl)
154{
155 return disable_irq(wl->irq);
156}
157
133static void wl1251_sdio_set_power(bool enable) 158static void wl1251_sdio_set_power(bool enable)
134{ 159{
135} 160}
136 161
137static const struct wl1251_if_operations wl1251_sdio_ops = { 162static struct wl1251_if_operations wl1251_sdio_ops = {
138 .read = wl1251_sdio_read, 163 .read = wl1251_sdio_read,
139 .write = wl1251_sdio_write, 164 .write = wl1251_sdio_write,
140 .write_elp = wl1251_sdio_write_elp, 165 .write_elp = wl1251_sdio_write_elp,
141 .read_elp = wl1251_sdio_read_elp, 166 .read_elp = wl1251_sdio_read_elp,
142 .reset = wl1251_sdio_reset, 167 .reset = wl1251_sdio_reset,
143 .enable_irq = wl1251_sdio_enable_irq, 168};
144 .disable_irq = wl1251_sdio_disable_irq, 169
170static int wl1251_platform_probe(struct platform_device *pdev)
171{
172 if (pdev->id != -1) {
173 wl1251_error("can only handle single device");
174 return -ENODEV;
175 }
176
177 wl12xx_board_data = pdev->dev.platform_data;
178 return 0;
179}
180
181/*
182 * Dummy platform_driver for passing platform_data to this driver,
183 * until we have a way to pass this through SDIO subsystem or
184 * some other way.
185 */
186static struct platform_driver wl1251_platform_driver = {
187 .driver = {
188 .name = "wl1251_data",
189 .owner = THIS_MODULE,
190 },
191 .probe = wl1251_platform_probe,
145}; 192};
146 193
147static int wl1251_sdio_probe(struct sdio_func *func, 194static int wl1251_sdio_probe(struct sdio_func *func,
@@ -163,20 +210,50 @@ static int wl1251_sdio_probe(struct sdio_func *func,
163 goto release; 210 goto release;
164 211
165 sdio_set_block_size(func, 512); 212 sdio_set_block_size(func, 512);
213 sdio_release_host(func);
166 214
167 SET_IEEE80211_DEV(hw, &func->dev); 215 SET_IEEE80211_DEV(hw, &func->dev);
168 wl->if_priv = func; 216 wl->if_priv = func;
169 wl->if_ops = &wl1251_sdio_ops; 217 wl->if_ops = &wl1251_sdio_ops;
170 wl->set_power = wl1251_sdio_set_power; 218 wl->set_power = wl1251_sdio_set_power;
171 219
172 sdio_release_host(func); 220 if (wl12xx_board_data != NULL) {
221 wl->set_power = wl12xx_board_data->set_power;
222 wl->irq = wl12xx_board_data->irq;
223 wl->use_eeprom = wl12xx_board_data->use_eeprom;
224 }
225
226 if (wl->irq) {
227 ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl);
228 if (ret < 0) {
229 wl1251_error("request_irq() failed: %d", ret);
230 goto disable;
231 }
232
233 set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
234 disable_irq(wl->irq);
235
236 wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
237 wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
238
239 wl1251_info("using dedicated interrupt line");
240 } else {
241 wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
242 wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
243
244 wl1251_info("using SDIO interrupt");
245 }
246
173 ret = wl1251_init_ieee80211(wl); 247 ret = wl1251_init_ieee80211(wl);
174 if (ret) 248 if (ret)
175 goto disable; 249 goto out_free_irq;
176 250
177 sdio_set_drvdata(func, wl); 251 sdio_set_drvdata(func, wl);
178 return ret; 252 return ret;
179 253
254out_free_irq:
255 if (wl->irq)
256 free_irq(wl->irq, wl);
180disable: 257disable:
181 sdio_claim_host(func); 258 sdio_claim_host(func);
182 sdio_disable_func(func); 259 sdio_disable_func(func);
@@ -189,6 +266,8 @@ static void __devexit wl1251_sdio_remove(struct sdio_func *func)
189{ 266{
190 struct wl1251 *wl = sdio_get_drvdata(func); 267 struct wl1251 *wl = sdio_get_drvdata(func);
191 268
269 if (wl->irq)
270 free_irq(wl->irq, wl);
192 wl1251_free_hw(wl); 271 wl1251_free_hw(wl);
193 272
194 sdio_claim_host(func); 273 sdio_claim_host(func);
@@ -208,6 +287,12 @@ static int __init wl1251_sdio_init(void)
208{ 287{
209 int err; 288 int err;
210 289
290 err = platform_driver_register(&wl1251_platform_driver);
291 if (err) {
292 wl1251_error("failed to register platform driver: %d", err);
293 return err;
294 }
295
211 err = sdio_register_driver(&wl1251_sdio_driver); 296 err = sdio_register_driver(&wl1251_sdio_driver);
212 if (err) 297 if (err)
213 wl1251_error("failed to register sdio driver: %d", err); 298 wl1251_error("failed to register sdio driver: %d", err);
@@ -217,6 +302,7 @@ static int __init wl1251_sdio_init(void)
217static void __exit wl1251_sdio_exit(void) 302static void __exit wl1251_sdio_exit(void)
218{ 303{
219 sdio_unregister_driver(&wl1251_sdio_driver); 304 sdio_unregister_driver(&wl1251_sdio_driver);
305 platform_driver_unregister(&wl1251_platform_driver);
220 wl1251_notice("unloaded"); 306 wl1251_notice("unloaded");
221} 307}
222 308
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c
index 2ad086efe06e..e19e2f8f1e52 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.c
@@ -590,7 +590,7 @@ int wl1271_acx_sg_cfg(struct wl1271 *wl)
590 590
591 /* BT-WLAN coext parameters */ 591 /* BT-WLAN coext parameters */
592 for (i = 0; i < CONF_SG_PARAMS_MAX; i++) 592 for (i = 0; i < CONF_SG_PARAMS_MAX; i++)
593 param->params[i] = c->params[i]; 593 param->params[i] = cpu_to_le32(c->params[i]);
594 param->param_idx = CONF_SG_PARAMS_ALL; 594 param->param_idx = CONF_SG_PARAMS_ALL;
595 595
596 ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param)); 596 ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param));
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c
index 8087dc17f29d..acb1d9e6b7d2 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.c
@@ -351,7 +351,7 @@ static int wl1271_boot_soft_reset(struct wl1271 *wl)
351static int wl1271_boot_run_firmware(struct wl1271 *wl) 351static int wl1271_boot_run_firmware(struct wl1271 *wl)
352{ 352{
353 int loop, ret; 353 int loop, ret;
354 u32 chip_id, interrupt; 354 u32 chip_id, intr;
355 355
356 wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT); 356 wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
357 357
@@ -368,15 +368,15 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
368 loop = 0; 368 loop = 0;
369 while (loop++ < INIT_LOOP) { 369 while (loop++ < INIT_LOOP) {
370 udelay(INIT_LOOP_DELAY); 370 udelay(INIT_LOOP_DELAY);
371 interrupt = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); 371 intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
372 372
373 if (interrupt == 0xffffffff) { 373 if (intr == 0xffffffff) {
374 wl1271_error("error reading hardware complete " 374 wl1271_error("error reading hardware complete "
375 "init indication"); 375 "init indication");
376 return -EIO; 376 return -EIO;
377 } 377 }
378 /* check that ACX_INTR_INIT_COMPLETE is enabled */ 378 /* check that ACX_INTR_INIT_COMPLETE is enabled */
379 else if (interrupt & WL1271_ACX_INTR_INIT_COMPLETE) { 379 else if (intr & WL1271_ACX_INTR_INIT_COMPLETE) {
380 wl1271_write32(wl, ACX_REG_INTERRUPT_ACK, 380 wl1271_write32(wl, ACX_REG_INTERRUPT_ACK,
381 WL1271_ACX_INTR_INIT_COMPLETE); 381 WL1271_ACX_INTR_INIT_COMPLETE);
382 break; 382 break;
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index 6b5ba8ec94c9..62c11af1d8e2 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -37,7 +37,7 @@
37#include "wl1271_cmd.h" 37#include "wl1271_cmd.h"
38#include "wl1271_event.h" 38#include "wl1271_event.h"
39 39
40#define WL1271_CMD_POLL_COUNT 5 40#define WL1271_CMD_FAST_POLL_COUNT 50
41 41
42/* 42/*
43 * send command to firmware 43 * send command to firmware
@@ -77,11 +77,11 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
77 goto out; 77 goto out;
78 } 78 }
79 79
80 udelay(10);
81 poll_count++; 80 poll_count++;
82 if (poll_count == WL1271_CMD_POLL_COUNT) 81 if (poll_count < WL1271_CMD_FAST_POLL_COUNT)
83 wl1271_info("cmd polling took over %d cycles", 82 udelay(10);
84 poll_count); 83 else
84 msleep(1);
85 85
86 intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); 86 intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
87 } 87 }
@@ -318,7 +318,7 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
318 join->rx_config_options = cpu_to_le32(wl->rx_config); 318 join->rx_config_options = cpu_to_le32(wl->rx_config);
319 join->rx_filter_options = cpu_to_le32(wl->rx_filter); 319 join->rx_filter_options = cpu_to_le32(wl->rx_filter);
320 join->bss_type = bss_type; 320 join->bss_type = bss_type;
321 join->basic_rate_set = wl->basic_rate_set; 321 join->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
322 322
323 if (wl->band == IEEE80211_BAND_5GHZ) 323 if (wl->band == IEEE80211_BAND_5GHZ)
324 join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ; 324 join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ;
@@ -615,7 +615,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
615 params->params.scan_options = cpu_to_le16(scan_options); 615 params->params.scan_options = cpu_to_le16(scan_options);
616 616
617 params->params.num_probe_requests = probe_requests; 617 params->params.num_probe_requests = probe_requests;
618 params->params.tx_rate = rate; 618 params->params.tx_rate = cpu_to_le32(rate);
619 params->params.tid_trigger = 0; 619 params->params.tid_trigger = 0;
620 params->params.scan_tag = WL1271_SCAN_DEFAULT_TAG; 620 params->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
621 621
diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/wl1271_conf.h
index c44307c4bcf8..d046d044b5bd 100644
--- a/drivers/net/wireless/wl12xx/wl1271_conf.h
+++ b/drivers/net/wireless/wl12xx/wl1271_conf.h
@@ -401,7 +401,7 @@ enum {
401}; 401};
402 402
403struct conf_sg_settings { 403struct conf_sg_settings {
404 __le32 params[CONF_SG_PARAMS_MAX]; 404 u32 params[CONF_SG_PARAMS_MAX];
405 u8 state; 405 u8 state;
406}; 406};
407 407
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index 814f300c3f17..62e544041d0d 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -1118,14 +1118,13 @@ static void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters)
1118 } 1118 }
1119} 1119}
1120 1120
1121static int wl1271_join_channel(struct wl1271 *wl, int channel) 1121static int wl1271_dummy_join(struct wl1271 *wl)
1122{ 1122{
1123 int ret = 0; 1123 int ret = 0;
1124 /* we need to use a dummy BSSID for now */ 1124 /* we need to use a dummy BSSID for now */
1125 static const u8 dummy_bssid[ETH_ALEN] = { 0x0b, 0xad, 0xde, 1125 static const u8 dummy_bssid[ETH_ALEN] = { 0x0b, 0xad, 0xde,
1126 0xad, 0xbe, 0xef }; 1126 0xad, 0xbe, 0xef };
1127 1127
1128 wl->channel = channel;
1129 memcpy(wl->bssid, dummy_bssid, ETH_ALEN); 1128 memcpy(wl->bssid, dummy_bssid, ETH_ALEN);
1130 1129
1131 /* pass through frames from all BSS */ 1130 /* pass through frames from all BSS */
@@ -1141,7 +1140,47 @@ out:
1141 return ret; 1140 return ret;
1142} 1141}
1143 1142
1144static int wl1271_unjoin_channel(struct wl1271 *wl) 1143static int wl1271_join(struct wl1271 *wl)
1144{
1145 int ret;
1146
1147 ret = wl1271_cmd_join(wl, wl->set_bss_type);
1148 if (ret < 0)
1149 goto out;
1150
1151 set_bit(WL1271_FLAG_JOINED, &wl->flags);
1152
1153 if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
1154 goto out;
1155
1156 /*
1157 * The join command disable the keep-alive mode, shut down its process,
1158 * and also clear the template config, so we need to reset it all after
1159 * the join. The acx_aid starts the keep-alive process, and the order
1160 * of the commands below is relevant.
1161 */
1162 ret = wl1271_acx_keep_alive_mode(wl, true);
1163 if (ret < 0)
1164 goto out;
1165
1166 ret = wl1271_acx_aid(wl, wl->aid);
1167 if (ret < 0)
1168 goto out;
1169
1170 ret = wl1271_cmd_build_klv_null_data(wl);
1171 if (ret < 0)
1172 goto out;
1173
1174 ret = wl1271_acx_keep_alive_config(wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
1175 ACX_KEEP_ALIVE_TPL_VALID);
1176 if (ret < 0)
1177 goto out;
1178
1179out:
1180 return ret;
1181}
1182
1183static int wl1271_unjoin(struct wl1271 *wl)
1145{ 1184{
1146 int ret; 1185 int ret;
1147 1186
@@ -1231,7 +1270,7 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1231 "failed %d", ret); 1270 "failed %d", ret);
1232 1271
1233 if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) { 1272 if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
1234 ret = wl1271_cmd_join(wl, wl->set_bss_type); 1273 ret = wl1271_join(wl);
1235 if (ret < 0) 1274 if (ret < 0)
1236 wl1271_warning("cmd join to update channel " 1275 wl1271_warning("cmd join to update channel "
1237 "failed %d", ret); 1276 "failed %d", ret);
@@ -1241,9 +1280,9 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1241 if (changed & IEEE80211_CONF_CHANGE_IDLE) { 1280 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
1242 if (conf->flags & IEEE80211_CONF_IDLE && 1281 if (conf->flags & IEEE80211_CONF_IDLE &&
1243 test_bit(WL1271_FLAG_JOINED, &wl->flags)) 1282 test_bit(WL1271_FLAG_JOINED, &wl->flags))
1244 wl1271_unjoin_channel(wl); 1283 wl1271_unjoin(wl);
1245 else if (!(conf->flags & IEEE80211_CONF_IDLE)) 1284 else if (!(conf->flags & IEEE80211_CONF_IDLE))
1246 wl1271_join_channel(wl, channel); 1285 wl1271_dummy_join(wl);
1247 1286
1248 if (conf->flags & IEEE80211_CONF_IDLE) { 1287 if (conf->flags & IEEE80211_CONF_IDLE) {
1249 wl->rate_set = wl1271_min_rate_get(wl); 1288 wl->rate_set = wl1271_min_rate_get(wl);
@@ -1311,7 +1350,6 @@ static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
1311 struct wl1271_filter_params *fp; 1350 struct wl1271_filter_params *fp;
1312 struct netdev_hw_addr *ha; 1351 struct netdev_hw_addr *ha;
1313 struct wl1271 *wl = hw->priv; 1352 struct wl1271 *wl = hw->priv;
1314 int i;
1315 1353
1316 if (unlikely(wl->state == WL1271_STATE_OFF)) 1354 if (unlikely(wl->state == WL1271_STATE_OFF))
1317 return 0; 1355 return 0;
@@ -1520,6 +1558,7 @@ out:
1520} 1558}
1521 1559
1522static int wl1271_op_hw_scan(struct ieee80211_hw *hw, 1560static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
1561 struct ieee80211_vif *vif,
1523 struct cfg80211_scan_request *req) 1562 struct cfg80211_scan_request *req)
1524{ 1563{
1525 struct wl1271 *wl = hw->priv; 1564 struct wl1271 *wl = hw->priv;
@@ -1608,7 +1647,6 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1608 enum wl1271_cmd_ps_mode mode; 1647 enum wl1271_cmd_ps_mode mode;
1609 struct wl1271 *wl = hw->priv; 1648 struct wl1271 *wl = hw->priv;
1610 bool do_join = false; 1649 bool do_join = false;
1611 bool do_keepalive = false;
1612 int ret; 1650 int ret;
1613 1651
1614 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed"); 1652 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed");
@@ -1703,6 +1741,10 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1703 if (ret < 0) 1741 if (ret < 0)
1704 goto out_sleep; 1742 goto out_sleep;
1705 1743
1744 ret = wl1271_build_qos_null_data(wl);
1745 if (ret < 0)
1746 goto out_sleep;
1747
1706 /* filter out all packets not from this BSSID */ 1748 /* filter out all packets not from this BSSID */
1707 wl1271_configure_filters(wl, 0); 1749 wl1271_configure_filters(wl, 0);
1708 1750
@@ -1747,19 +1789,6 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1747 ret = wl1271_cmd_build_probe_req(wl, NULL, 0, 1789 ret = wl1271_cmd_build_probe_req(wl, NULL, 0,
1748 NULL, 0, wl->band); 1790 NULL, 0, wl->band);
1749 1791
1750 /* Enable the keep-alive feature */
1751 ret = wl1271_acx_keep_alive_mode(wl, true);
1752 if (ret < 0)
1753 goto out_sleep;
1754
1755 /*
1756 * This is awkward. The keep-alive configs must be done
1757 * *after* the join command, because otherwise it will
1758 * not work, but it must only be done *once* because
1759 * otherwise the firmware will start complaining.
1760 */
1761 do_keepalive = true;
1762
1763 /* enable the connection monitoring feature */ 1792 /* enable the connection monitoring feature */
1764 ret = wl1271_acx_conn_monit_params(wl, true); 1793 ret = wl1271_acx_conn_monit_params(wl, true);
1765 if (ret < 0) 1794 if (ret < 0)
@@ -1827,35 +1856,11 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1827 } 1856 }
1828 1857
1829 if (do_join) { 1858 if (do_join) {
1830 ret = wl1271_cmd_join(wl, wl->set_bss_type); 1859 ret = wl1271_join(wl);
1831 if (ret < 0) { 1860 if (ret < 0) {
1832 wl1271_warning("cmd join failed %d", ret); 1861 wl1271_warning("cmd join failed %d", ret);
1833 goto out_sleep; 1862 goto out_sleep;
1834 } 1863 }
1835 set_bit(WL1271_FLAG_JOINED, &wl->flags);
1836 }
1837
1838 /*
1839 * The JOIN operation shuts down the firmware keep-alive as a side
1840 * effect, and the ACX_AID will start the keep-alive as a side effect.
1841 * Hence, for non-IBSS, the ACX_AID must always happen *after* the
1842 * JOIN operation, and the template config after the ACX_AID.
1843 */
1844 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
1845 ret = wl1271_acx_aid(wl, wl->aid);
1846 if (ret < 0)
1847 goto out_sleep;
1848 }
1849
1850 if (do_keepalive) {
1851 ret = wl1271_cmd_build_klv_null_data(wl);
1852 if (ret < 0)
1853 goto out_sleep;
1854 ret = wl1271_acx_keep_alive_config(
1855 wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
1856 ACX_KEEP_ALIVE_TPL_VALID);
1857 if (ret < 0)
1858 goto out_sleep;
1859 } 1864 }
1860 1865
1861out_sleep: 1866out_sleep:
@@ -2266,7 +2271,6 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
2266 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval; 2271 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
2267 2272
2268 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | 2273 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
2269 IEEE80211_HW_NOISE_DBM |
2270 IEEE80211_HW_BEACON_FILTER | 2274 IEEE80211_HW_BEACON_FILTER |
2271 IEEE80211_HW_SUPPORTS_PS | 2275 IEEE80211_HW_SUPPORTS_PS |
2272 IEEE80211_HW_SUPPORTS_UAPSD | 2276 IEEE80211_HW_SUPPORTS_UAPSD |
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index e9381fe3baf4..93828d565390 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -1171,7 +1171,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
1171 } 1171 }
1172 1172
1173 /* Get the virtual base address for the device */ 1173 /* Get the virtual base address for the device */
1174 lp->base_addr = ioremap(r_mem.start, r_mem.end - r_mem.start + 1); 1174 lp->base_addr = ioremap(r_mem.start, resource_size(&r_mem));
1175 if (NULL == lp->base_addr) { 1175 if (NULL == lp->base_addr) {
1176 dev_err(dev, "EmacLite: Could not allocate iomem\n"); 1176 dev_err(dev, "EmacLite: Could not allocate iomem\n");
1177 rc = -EIO; 1177 rc = -EIO;
@@ -1224,7 +1224,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
1224 return 0; 1224 return 0;
1225 1225
1226error1: 1226error1:
1227 release_mem_region(ndev->mem_start, r_mem.end - r_mem.start + 1); 1227 release_mem_region(ndev->mem_start, resource_size(&r_mem));
1228 1228
1229error2: 1229error2:
1230 xemaclite_remove_ndev(ndev); 1230 xemaclite_remove_ndev(ndev);
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index f230f6543bff..854959cada3a 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -1484,6 +1484,11 @@ int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *info)
1484 if (!s) 1484 if (!s)
1485 return -EINVAL; 1485 return -EINVAL;
1486 1486
1487 if (s->functions) {
1488 WARN_ON(1);
1489 return -EINVAL;
1490 }
1491
1487 /* We do not want to validate the CIS cache... */ 1492 /* We do not want to validate the CIS cache... */
1488 mutex_lock(&s->ops_mutex); 1493 mutex_lock(&s->ops_mutex);
1489 destroy_cis_cache(s); 1494 destroy_cis_cache(s);
@@ -1639,7 +1644,7 @@ static ssize_t pccard_show_cis(struct kobject *kobj,
1639 count = 0; 1644 count = 0;
1640 else { 1645 else {
1641 struct pcmcia_socket *s; 1646 struct pcmcia_socket *s;
1642 unsigned int chains; 1647 unsigned int chains = 1;
1643 1648
1644 if (off + count > size) 1649 if (off + count > size)
1645 count = size - off; 1650 count = size - off;
@@ -1648,7 +1653,7 @@ static ssize_t pccard_show_cis(struct kobject *kobj,
1648 1653
1649 if (!(s->state & SOCKET_PRESENT)) 1654 if (!(s->state & SOCKET_PRESENT))
1650 return -ENODEV; 1655 return -ENODEV;
1651 if (pccard_validate_cis(s, &chains)) 1656 if (!s->functions && pccard_validate_cis(s, &chains))
1652 return -EIO; 1657 return -EIO;
1653 if (!chains) 1658 if (!chains)
1654 return -ENODATA; 1659 return -ENODATA;
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 6206408e196c..2d48196a48cd 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -166,8 +166,10 @@ static int db1x_pcmcia_setup_irqs(struct db1x_pcmcia_sock *sock)
166 166
167 ret = request_irq(sock->insert_irq, db1200_pcmcia_cdirq, 167 ret = request_irq(sock->insert_irq, db1200_pcmcia_cdirq,
168 IRQF_DISABLED, "pcmcia_insert", sock); 168 IRQF_DISABLED, "pcmcia_insert", sock);
169 if (ret) 169 if (ret) {
170 local_irq_restore(flags);
170 goto out1; 171 goto out1;
172 }
171 173
172 ret = request_irq(sock->eject_irq, db1200_pcmcia_cdirq, 174 ret = request_irq(sock->eject_irq, db1200_pcmcia_cdirq,
173 IRQF_DISABLED, "pcmcia_eject", sock); 175 IRQF_DISABLED, "pcmcia_eject", sock);
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index cb6036d89e59..4014cf8e4a26 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -687,12 +687,10 @@ static void pcmcia_requery(struct pcmcia_socket *s)
687 new_funcs = mfc.nfn; 687 new_funcs = mfc.nfn;
688 else 688 else
689 new_funcs = 1; 689 new_funcs = 1;
690 if (old_funcs > new_funcs) { 690 if (old_funcs != new_funcs) {
691 /* we need to re-start */
691 pcmcia_card_remove(s, NULL); 692 pcmcia_card_remove(s, NULL);
692 pcmcia_card_add(s); 693 pcmcia_card_add(s);
693 } else if (new_funcs > old_funcs) {
694 s->functions = new_funcs;
695 pcmcia_device_add(s, 1);
696 } 694 }
697 } 695 }
698 696
@@ -728,6 +726,8 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
728 struct pcmcia_socket *s = dev->socket; 726 struct pcmcia_socket *s = dev->socket;
729 const struct firmware *fw; 727 const struct firmware *fw;
730 int ret = -ENOMEM; 728 int ret = -ENOMEM;
729 cistpl_longlink_mfc_t mfc;
730 int old_funcs, new_funcs = 1;
731 731
732 if (!filename) 732 if (!filename)
733 return -EINVAL; 733 return -EINVAL;
@@ -750,6 +750,14 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
750 goto release; 750 goto release;
751 } 751 }
752 752
753 /* we need to re-start if the number of functions changed */
754 old_funcs = s->functions;
755 if (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_LONGLINK_MFC,
756 &mfc))
757 new_funcs = mfc.nfn;
758
759 if (old_funcs != new_funcs)
760 ret = -EBUSY;
753 761
754 /* update information */ 762 /* update information */
755 pcmcia_device_query(dev); 763 pcmcia_device_query(dev);
@@ -858,10 +866,8 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
858 if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) { 866 if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) {
859 dev_dbg(&dev->dev, "device needs a fake CIS\n"); 867 dev_dbg(&dev->dev, "device needs a fake CIS\n");
860 if (!dev->socket->fake_cis) 868 if (!dev->socket->fake_cis)
861 pcmcia_load_firmware(dev, did->cisfile); 869 if (pcmcia_load_firmware(dev, did->cisfile))
862 870 return 0;
863 if (!dev->socket->fake_cis)
864 return 0;
865 } 871 }
866 872
867 if (did->match_flags & PCMCIA_DEV_ID_MATCH_ANONYMOUS) { 873 if (did->match_flags & PCMCIA_DEV_ID_MATCH_ANONYMOUS) {
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index caec1dee2a4b..7c3d03bb4f30 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -755,12 +755,12 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
755 else 755 else
756 printk(KERN_WARNING "pcmcia: Driver needs updating to support IRQ sharing.\n"); 756 printk(KERN_WARNING "pcmcia: Driver needs updating to support IRQ sharing.\n");
757 757
758#ifdef CONFIG_PCMCIA_PROBE 758 /* If the interrupt is already assigned, it must be the same */
759 759 if (s->irq.AssignedIRQ != 0)
760 if (s->irq.AssignedIRQ != 0) {
761 /* If the interrupt is already assigned, it must be the same */
762 irq = s->irq.AssignedIRQ; 760 irq = s->irq.AssignedIRQ;
763 } else { 761
762#ifdef CONFIG_PCMCIA_PROBE
763 if (!irq) {
764 int try; 764 int try;
765 u32 mask = s->irq_mask; 765 u32 mask = s->irq_mask;
766 void *data = p_dev; /* something unique to this device */ 766 void *data = p_dev; /* something unique to this device */
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 559069a80a3b..a6eb7b59ba9f 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -214,7 +214,7 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base,
214 return; 214 return;
215 } 215 }
216 for (i = base, most = 0; i < base+num; i += 8) { 216 for (i = base, most = 0; i < base+num; i += 8) {
217 res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); 217 res = claim_region(s, i, 8, IORESOURCE_IO, "PCMCIA ioprobe");
218 if (!res) 218 if (!res)
219 continue; 219 continue;
220 hole = inb(i); 220 hole = inb(i);
@@ -231,9 +231,14 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base,
231 231
232 bad = any = 0; 232 bad = any = 0;
233 for (i = base; i < base+num; i += 8) { 233 for (i = base; i < base+num; i += 8) {
234 res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); 234 res = claim_region(s, i, 8, IORESOURCE_IO, "PCMCIA ioprobe");
235 if (!res) 235 if (!res) {
236 if (!any)
237 printk(" excluding");
238 if (!bad)
239 bad = any = i;
236 continue; 240 continue;
241 }
237 for (j = 0; j < 8; j++) 242 for (j = 0; j < 8; j++)
238 if (inb(i+j) != most) 243 if (inb(i+j) != most)
239 break; 244 break;
@@ -253,6 +258,7 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base,
253 } 258 }
254 if (bad) { 259 if (bad) {
255 if ((num > 16) && (bad == base) && (i == base+num)) { 260 if ((num > 16) && (bad == base) && (i == base+num)) {
261 sub_interval(&s_data->io_db, bad, i-bad);
256 printk(" nothing: probe failed.\n"); 262 printk(" nothing: probe failed.\n");
257 return; 263 return;
258 } else { 264 } else {
@@ -804,7 +810,7 @@ static int adjust_memory(struct pcmcia_socket *s, unsigned int action, unsigned
804static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long start, unsigned long end) 810static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long start, unsigned long end)
805{ 811{
806 struct socket_data *data = s->resource_data; 812 struct socket_data *data = s->resource_data;
807 unsigned long size = end - start + 1; 813 unsigned long size;
808 int ret = 0; 814 int ret = 0;
809 815
810#if defined(CONFIG_X86) 816#if defined(CONFIG_X86)
@@ -814,6 +820,8 @@ static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long
814 start = 0x100; 820 start = 0x100;
815#endif 821#endif
816 822
823 size = end - start + 1;
824
817 if (end < start) 825 if (end < start)
818 return -EINVAL; 826 return -EINVAL;
819 827
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 0ee725ced511..9eae04afa9a0 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -599,9 +599,9 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
599 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); 599 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
600 write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); 600 write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
601 601
602 if (sock->sk->sk_sleep && waitqueue_active(sock->sk->sk_sleep)) { 602 if (sk_sleep(sock->sk) && waitqueue_active(sk_sleep(sock->sk))) {
603 sock->sk->sk_err = EIO; 603 sock->sk->sk_err = EIO;
604 wake_up_interruptible(sock->sk->sk_sleep); 604 wake_up_interruptible(sk_sleep(sock->sk));
605 } 605 }
606 606
607 iscsi_conn_stop(cls_conn, flag); 607 iscsi_conn_stop(cls_conn, flag);
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 175d202ab37e..8cfa5b12ea7a 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -105,6 +105,10 @@ struct serial_cfg_mem {
105 * manfid 0x0160, 0x0104 105 * manfid 0x0160, 0x0104
106 * This card appears to have a 14.7456MHz clock. 106 * This card appears to have a 14.7456MHz clock.
107 */ 107 */
108/* Generic Modem: MD55x (GPRS/EDGE) have
109 * Elan VPU16551 UART with 14.7456MHz oscillator
110 * manfid 0x015D, 0x4C45
111 */
108static void quirk_setup_brainboxes_0104(struct pcmcia_device *link, struct uart_port *port) 112static void quirk_setup_brainboxes_0104(struct pcmcia_device *link, struct uart_port *port)
109{ 113{
110 port->uartclk = 14745600; 114 port->uartclk = 14745600;
@@ -196,6 +200,11 @@ static const struct serial_quirk quirks[] = {
196 .multi = -1, 200 .multi = -1,
197 .setup = quirk_setup_brainboxes_0104, 201 .setup = quirk_setup_brainboxes_0104,
198 }, { 202 }, {
203 .manfid = 0x015D,
204 .prodid = 0x4C45,
205 .multi = -1,
206 .setup = quirk_setup_brainboxes_0104,
207 }, {
199 .manfid = MANFID_IBM, 208 .manfid = MANFID_IBM,
200 .prodid = ~0, 209 .prodid = ~0,
201 .multi = -1, 210 .multi = -1,
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 59c3c0fdbecd..59ae76bace14 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -233,6 +233,8 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc)
233{ 233{
234 if (!cc->dev) 234 if (!cc->dev)
235 return; /* We don't have a ChipCommon */ 235 return; /* We don't have a ChipCommon */
236 if (cc->dev->id.revision >= 11)
237 cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
236 ssb_pmu_init(cc); 238 ssb_pmu_init(cc);
237 chipco_powercontrol_init(cc); 239 chipco_powercontrol_init(cc);
238 ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST); 240 ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST);
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index f1dcd7969a5c..0e8d35224614 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -246,20 +246,12 @@ static struct pci_controller ssb_pcicore_controller = {
246 .pci_ops = &ssb_pcicore_pciops, 246 .pci_ops = &ssb_pcicore_pciops,
247 .io_resource = &ssb_pcicore_io_resource, 247 .io_resource = &ssb_pcicore_io_resource,
248 .mem_resource = &ssb_pcicore_mem_resource, 248 .mem_resource = &ssb_pcicore_mem_resource,
249 .mem_offset = 0x24000000,
250}; 249};
251 250
252static u32 ssb_pcicore_pcibus_iobase = 0x100;
253static u32 ssb_pcicore_pcibus_membase = SSB_PCI_DMA;
254
255/* This function is called when doing a pci_enable_device(). 251/* This function is called when doing a pci_enable_device().
256 * We must first check if the device is a device on the PCI-core bridge. */ 252 * We must first check if the device is a device on the PCI-core bridge. */
257int ssb_pcicore_plat_dev_init(struct pci_dev *d) 253int ssb_pcicore_plat_dev_init(struct pci_dev *d)
258{ 254{
259 struct resource *res;
260 int pos, size;
261 u32 *base;
262
263 if (d->bus->ops != &ssb_pcicore_pciops) { 255 if (d->bus->ops != &ssb_pcicore_pciops) {
264 /* This is not a device on the PCI-core bridge. */ 256 /* This is not a device on the PCI-core bridge. */
265 return -ENODEV; 257 return -ENODEV;
@@ -268,27 +260,6 @@ int ssb_pcicore_plat_dev_init(struct pci_dev *d)
268 ssb_printk(KERN_INFO "PCI: Fixing up device %s\n", 260 ssb_printk(KERN_INFO "PCI: Fixing up device %s\n",
269 pci_name(d)); 261 pci_name(d));
270 262
271 /* Fix up resource bases */
272 for (pos = 0; pos < 6; pos++) {
273 res = &d->resource[pos];
274 if (res->flags & IORESOURCE_IO)
275 base = &ssb_pcicore_pcibus_iobase;
276 else
277 base = &ssb_pcicore_pcibus_membase;
278 res->flags |= IORESOURCE_PCI_FIXED;
279 if (res->end) {
280 size = res->end - res->start + 1;
281 if (*base & (size - 1))
282 *base = (*base + size) & ~(size - 1);
283 res->start = *base;
284 res->end = res->start + size - 1;
285 *base += size;
286 pci_write_config_dword(d, PCI_BASE_ADDRESS_0 + (pos << 2), res->start);
287 }
288 /* Fix up PCI bridge BAR0 only */
289 if (d->bus->number == 0 && PCI_SLOT(d->devfn) == 0)
290 break;
291 }
292 /* Fix up interrupt lines */ 263 /* Fix up interrupt lines */
293 d->irq = ssb_mips_irq(extpci_core->dev) + 2; 264 d->irq = ssb_mips_irq(extpci_core->dev) + 2;
294 pci_write_config_byte(d, PCI_INTERRUPT_LINE, d->irq); 265 pci_write_config_byte(d, PCI_INTERRUPT_LINE, d->irq);
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 80ff7d9e60de..7e5b8a8a64cc 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -834,6 +834,9 @@ int ssb_bus_pcibus_register(struct ssb_bus *bus,
834 if (!err) { 834 if (!err) {
835 ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found on " 835 ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found on "
836 "PCI device %s\n", dev_name(&host_pci->dev)); 836 "PCI device %s\n", dev_name(&host_pci->dev));
837 } else {
838 ssb_printk(KERN_ERR PFX "Failed to register PCI version"
839 " of SSB with error %d\n", err);
837 } 840 }
838 841
839 return err; 842 return err;
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index a8dbb06623c9..989e2752cc36 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -168,7 +168,7 @@ err_pci:
168} 168}
169 169
170/* Get the word-offset for a SSB_SPROM_XXX define. */ 170/* Get the word-offset for a SSB_SPROM_XXX define. */
171#define SPOFF(offset) (((offset) - SSB_SPROM_BASE) / sizeof(u16)) 171#define SPOFF(offset) ((offset) / sizeof(u16))
172/* Helper to extract some _offset, which is one of the SSB_SPROM_XXX defines. */ 172/* Helper to extract some _offset, which is one of the SSB_SPROM_XXX defines. */
173#define SPEX16(_outvar, _offset, _mask, _shift) \ 173#define SPEX16(_outvar, _offset, _mask, _shift) \
174 out->_outvar = ((in[SPOFF(_offset)] & (_mask)) >> (_shift)) 174 out->_outvar = ((in[SPOFF(_offset)] & (_mask)) >> (_shift))
@@ -254,7 +254,7 @@ static int sprom_do_read(struct ssb_bus *bus, u16 *sprom)
254 int i; 254 int i;
255 255
256 for (i = 0; i < bus->sprom_size; i++) 256 for (i = 0; i < bus->sprom_size; i++)
257 sprom[i] = ioread16(bus->mmio + SSB_SPROM_BASE + (i * 2)); 257 sprom[i] = ioread16(bus->mmio + bus->sprom_offset + (i * 2));
258 258
259 return 0; 259 return 0;
260} 260}
@@ -285,7 +285,7 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom)
285 ssb_printk("75%%"); 285 ssb_printk("75%%");
286 else if (i % 2) 286 else if (i % 2)
287 ssb_printk("."); 287 ssb_printk(".");
288 writew(sprom[i], bus->mmio + SSB_SPROM_BASE + (i * 2)); 288 writew(sprom[i], bus->mmio + bus->sprom_offset + (i * 2));
289 mmiowb(); 289 mmiowb();
290 msleep(20); 290 msleep(20);
291 } 291 }
@@ -621,6 +621,14 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
621 int err = -ENOMEM; 621 int err = -ENOMEM;
622 u16 *buf; 622 u16 *buf;
623 623
624 if (!ssb_is_sprom_available(bus)) {
625 ssb_printk(KERN_ERR PFX "No SPROM available!\n");
626 return -ENODEV;
627 }
628
629 bus->sprom_offset = (bus->chipco.dev->id.revision < 31) ?
630 SSB_SPROM_BASE1 : SSB_SPROM_BASE31;
631
624 buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL); 632 buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL);
625 if (!buf) 633 if (!buf)
626 goto out; 634 goto out;
diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c
index f2f920fef10d..007bc3a03486 100644
--- a/drivers/ssb/sprom.c
+++ b/drivers/ssb/sprom.c
@@ -176,3 +176,17 @@ const struct ssb_sprom *ssb_get_fallback_sprom(void)
176{ 176{
177 return fallback_sprom; 177 return fallback_sprom;
178} 178}
179
180/* http://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */
181bool ssb_is_sprom_available(struct ssb_bus *bus)
182{
183 /* status register only exists on chipcomon rev >= 11 and we need check
184 for >= 31 only */
185 /* this routine differs from specs as we do not access SPROM directly
186 on PCMCIA */
187 if (bus->bustype == SSB_BUSTYPE_PCI &&
188 bus->chipco.dev->id.revision >= 31)
189 return bus->chipco.capabilities & SSB_CHIPCO_CAP_SPROM;
190
191 return true;
192}
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 0e8468ffd100..0bf5020d0d32 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -194,10 +194,10 @@ config EP93XX_WATCHDOG
194 194
195config OMAP_WATCHDOG 195config OMAP_WATCHDOG
196 tristate "OMAP Watchdog" 196 tristate "OMAP Watchdog"
197 depends on ARCH_OMAP16XX || ARCH_OMAP2 || ARCH_OMAP3 197 depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS
198 help 198 help
199 Support for TI OMAP1610/OMAP1710/OMAP2420/OMAP3430 watchdog. Say 'Y' 199 Support for TI OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog. Say 'Y'
200 here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430 watchdog timer. 200 here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer.
201 201
202config PNX4008_WATCHDOG 202config PNX4008_WATCHDOG
203 tristate "PNX4008 Watchdog" 203 tristate "PNX4008 Watchdog"
@@ -302,7 +302,7 @@ config TS72XX_WATCHDOG
302 302
303config MAX63XX_WATCHDOG 303config MAX63XX_WATCHDOG
304 tristate "Max63xx watchdog" 304 tristate "Max63xx watchdog"
305 depends on ARM 305 depends on ARM && HAS_IOMEM
306 help 306 help
307 Support for memory mapped max63{69,70,71,72,73,74} watchdog timer. 307 Support for memory mapped max63{69,70,71,72,73,74} watchdog timer.
308 308
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 8b724aad6825..500d38342e1e 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -44,7 +44,7 @@ u32 booke_wdt_period = WDT_PERIOD_DEFAULT;
44 44
45#ifdef CONFIG_FSL_BOOKE 45#ifdef CONFIG_FSL_BOOKE
46#define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15)) 46#define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15))
47#define WDTP_MASK (WDTP(0)) 47#define WDTP_MASK (WDTP(0x3f))
48#else 48#else
49#define WDTP(x) (TCR_WP(x)) 49#define WDTP(x) (TCR_WP(x))
50#define WDTP_MASK (TCR_WP_MASK) 50#define WDTP_MASK (TCR_WP_MASK)
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index 75f3a83c0361..3053ff05ca41 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -154,9 +154,14 @@ static void max63xx_wdt_enable(struct max63xx_timeout *entry)
154 154
155static void max63xx_wdt_disable(void) 155static void max63xx_wdt_disable(void)
156{ 156{
157 u8 val;
158
157 spin_lock(&io_lock); 159 spin_lock(&io_lock);
158 160
159 __raw_writeb(3, wdt_base); 161 val = __raw_readb(wdt_base);
162 val &= ~MAX6369_WDSET;
163 val |= 3;
164 __raw_writeb(val, wdt_base);
160 165
161 spin_unlock(&io_lock); 166 spin_unlock(&io_lock);
162 167
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9e23ffea7f54..b34d32fdaaec 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3235,7 +3235,8 @@ int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
3235 u64 bytes) 3235 u64 bytes)
3236{ 3236{
3237 struct btrfs_space_info *data_sinfo; 3237 struct btrfs_space_info *data_sinfo;
3238 int ret = 0, committed = 0; 3238 u64 used;
3239 int ret = 0, committed = 0, flushed = 0;
3239 3240
3240 /* make sure bytes are sectorsize aligned */ 3241 /* make sure bytes are sectorsize aligned */
3241 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); 3242 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
@@ -3247,12 +3248,21 @@ int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
3247again: 3248again:
3248 /* make sure we have enough space to handle the data first */ 3249 /* make sure we have enough space to handle the data first */
3249 spin_lock(&data_sinfo->lock); 3250 spin_lock(&data_sinfo->lock);
3250 if (data_sinfo->total_bytes - data_sinfo->bytes_used - 3251 used = data_sinfo->bytes_used + data_sinfo->bytes_delalloc +
3251 data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved - 3252 data_sinfo->bytes_reserved + data_sinfo->bytes_pinned +
3252 data_sinfo->bytes_pinned - data_sinfo->bytes_readonly - 3253 data_sinfo->bytes_readonly + data_sinfo->bytes_may_use +
3253 data_sinfo->bytes_may_use - data_sinfo->bytes_super < bytes) { 3254 data_sinfo->bytes_super;
3255
3256 if (used + bytes > data_sinfo->total_bytes) {
3254 struct btrfs_trans_handle *trans; 3257 struct btrfs_trans_handle *trans;
3255 3258
3259 if (!flushed) {
3260 spin_unlock(&data_sinfo->lock);
3261 flush_delalloc(root, data_sinfo);
3262 flushed = 1;
3263 goto again;
3264 }
3265
3256 /* 3266 /*
3257 * if we don't have enough free bytes in this space then we need 3267 * if we don't have enough free bytes in this space then we need
3258 * to alloc a new chunk. 3268 * to alloc a new chunk.
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index aa7dc36dac78..8db7b14bbae8 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2250,6 +2250,12 @@ again:
2250 if (!looped) 2250 if (!looped)
2251 calc_size = max_t(u64, min_stripe_size, calc_size); 2251 calc_size = max_t(u64, min_stripe_size, calc_size);
2252 2252
2253 /*
2254 * we're about to do_div by the stripe_len so lets make sure
2255 * we end up with something bigger than a stripe
2256 */
2257 calc_size = max_t(u64, calc_size, stripe_len * 4);
2258
2253 do_div(calc_size, stripe_len); 2259 do_div(calc_size, stripe_len);
2254 calc_size *= stripe_len; 2260 calc_size *= stripe_len;
2255 2261
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index aa3cd7cc3e40..412593703d1e 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -337,16 +337,15 @@ out:
337/* 337/*
338 * Get ref for the oldest snapc for an inode with dirty data... that is, the 338 * Get ref for the oldest snapc for an inode with dirty data... that is, the
339 * only snap context we are allowed to write back. 339 * only snap context we are allowed to write back.
340 *
341 * Caller holds i_lock.
342 */ 340 */
343static struct ceph_snap_context *__get_oldest_context(struct inode *inode, 341static struct ceph_snap_context *get_oldest_context(struct inode *inode,
344 u64 *snap_size) 342 u64 *snap_size)
345{ 343{
346 struct ceph_inode_info *ci = ceph_inode(inode); 344 struct ceph_inode_info *ci = ceph_inode(inode);
347 struct ceph_snap_context *snapc = NULL; 345 struct ceph_snap_context *snapc = NULL;
348 struct ceph_cap_snap *capsnap = NULL; 346 struct ceph_cap_snap *capsnap = NULL;
349 347
348 spin_lock(&inode->i_lock);
350 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 349 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
351 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, 350 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
352 capsnap->context, capsnap->dirty_pages); 351 capsnap->context, capsnap->dirty_pages);
@@ -357,21 +356,11 @@ static struct ceph_snap_context *__get_oldest_context(struct inode *inode,
357 break; 356 break;
358 } 357 }
359 } 358 }
360 if (!snapc && ci->i_snap_realm) { 359 if (!snapc && ci->i_head_snapc) {
361 snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context); 360 snapc = ceph_get_snap_context(ci->i_head_snapc);
362 dout(" head snapc %p has %d dirty pages\n", 361 dout(" head snapc %p has %d dirty pages\n",
363 snapc, ci->i_wrbuffer_ref_head); 362 snapc, ci->i_wrbuffer_ref_head);
364 } 363 }
365 return snapc;
366}
367
368static struct ceph_snap_context *get_oldest_context(struct inode *inode,
369 u64 *snap_size)
370{
371 struct ceph_snap_context *snapc = NULL;
372
373 spin_lock(&inode->i_lock);
374 snapc = __get_oldest_context(inode, snap_size);
375 spin_unlock(&inode->i_lock); 364 spin_unlock(&inode->i_lock);
376 return snapc; 365 return snapc;
377} 366}
@@ -392,7 +381,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
392 int len = PAGE_CACHE_SIZE; 381 int len = PAGE_CACHE_SIZE;
393 loff_t i_size; 382 loff_t i_size;
394 int err = 0; 383 int err = 0;
395 struct ceph_snap_context *snapc; 384 struct ceph_snap_context *snapc, *oldest;
396 u64 snap_size = 0; 385 u64 snap_size = 0;
397 long writeback_stat; 386 long writeback_stat;
398 387
@@ -413,13 +402,16 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
413 dout("writepage %p page %p not dirty?\n", inode, page); 402 dout("writepage %p page %p not dirty?\n", inode, page);
414 goto out; 403 goto out;
415 } 404 }
416 if (snapc != get_oldest_context(inode, &snap_size)) { 405 oldest = get_oldest_context(inode, &snap_size);
406 if (snapc->seq > oldest->seq) {
417 dout("writepage %p page %p snapc %p not writeable - noop\n", 407 dout("writepage %p page %p snapc %p not writeable - noop\n",
418 inode, page, (void *)page->private); 408 inode, page, (void *)page->private);
419 /* we should only noop if called by kswapd */ 409 /* we should only noop if called by kswapd */
420 WARN_ON((current->flags & PF_MEMALLOC) == 0); 410 WARN_ON((current->flags & PF_MEMALLOC) == 0);
411 ceph_put_snap_context(oldest);
421 goto out; 412 goto out;
422 } 413 }
414 ceph_put_snap_context(oldest);
423 415
424 /* is this a partial page at end of file? */ 416 /* is this a partial page at end of file? */
425 if (snap_size) 417 if (snap_size)
@@ -458,7 +450,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
458 ClearPagePrivate(page); 450 ClearPagePrivate(page);
459 end_page_writeback(page); 451 end_page_writeback(page);
460 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 452 ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
461 ceph_put_snap_context(snapc); 453 ceph_put_snap_context(snapc); /* page's reference */
462out: 454out:
463 return err; 455 return err;
464} 456}
@@ -558,9 +550,9 @@ static void writepages_finish(struct ceph_osd_request *req,
558 dout("inode %p skipping page %p\n", inode, page); 550 dout("inode %p skipping page %p\n", inode, page);
559 wbc->pages_skipped++; 551 wbc->pages_skipped++;
560 } 552 }
553 ceph_put_snap_context((void *)page->private);
561 page->private = 0; 554 page->private = 0;
562 ClearPagePrivate(page); 555 ClearPagePrivate(page);
563 ceph_put_snap_context(snapc);
564 dout("unlocking %d %p\n", i, page); 556 dout("unlocking %d %p\n", i, page);
565 end_page_writeback(page); 557 end_page_writeback(page);
566 558
@@ -618,7 +610,7 @@ static int ceph_writepages_start(struct address_space *mapping,
618 int range_whole = 0; 610 int range_whole = 0;
619 int should_loop = 1; 611 int should_loop = 1;
620 pgoff_t max_pages = 0, max_pages_ever = 0; 612 pgoff_t max_pages = 0, max_pages_ever = 0;
621 struct ceph_snap_context *snapc = NULL, *last_snapc = NULL; 613 struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
622 struct pagevec pvec; 614 struct pagevec pvec;
623 int done = 0; 615 int done = 0;
624 int rc = 0; 616 int rc = 0;
@@ -770,9 +762,10 @@ get_more_pages:
770 } 762 }
771 763
772 /* only if matching snap context */ 764 /* only if matching snap context */
773 if (snapc != (void *)page->private) { 765 pgsnapc = (void *)page->private;
774 dout("page snapc %p != oldest %p\n", 766 if (pgsnapc->seq > snapc->seq) {
775 (void *)page->private, snapc); 767 dout("page snapc %p %lld > oldest %p %lld\n",
768 pgsnapc, pgsnapc->seq, snapc, snapc->seq);
776 unlock_page(page); 769 unlock_page(page);
777 if (!locked_pages) 770 if (!locked_pages)
778 continue; /* keep looking for snap */ 771 continue; /* keep looking for snap */
@@ -914,7 +907,10 @@ static int context_is_writeable_or_written(struct inode *inode,
914 struct ceph_snap_context *snapc) 907 struct ceph_snap_context *snapc)
915{ 908{
916 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL); 909 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL);
917 return !oldest || snapc->seq <= oldest->seq; 910 int ret = !oldest || snapc->seq <= oldest->seq;
911
912 ceph_put_snap_context(oldest);
913 return ret;
918} 914}
919 915
920/* 916/*
@@ -936,8 +932,8 @@ static int ceph_update_writeable_page(struct file *file,
936 int pos_in_page = pos & ~PAGE_CACHE_MASK; 932 int pos_in_page = pos & ~PAGE_CACHE_MASK;
937 int end_in_page = pos_in_page + len; 933 int end_in_page = pos_in_page + len;
938 loff_t i_size; 934 loff_t i_size;
939 struct ceph_snap_context *snapc;
940 int r; 935 int r;
936 struct ceph_snap_context *snapc, *oldest;
941 937
942retry_locked: 938retry_locked:
943 /* writepages currently holds page lock, but if we change that later, */ 939 /* writepages currently holds page lock, but if we change that later, */
@@ -947,23 +943,24 @@ retry_locked:
947 BUG_ON(!ci->i_snap_realm); 943 BUG_ON(!ci->i_snap_realm);
948 down_read(&mdsc->snap_rwsem); 944 down_read(&mdsc->snap_rwsem);
949 BUG_ON(!ci->i_snap_realm->cached_context); 945 BUG_ON(!ci->i_snap_realm->cached_context);
950 if (page->private && 946 snapc = (void *)page->private;
951 (void *)page->private != ci->i_snap_realm->cached_context) { 947 if (snapc && snapc != ci->i_head_snapc) {
952 /* 948 /*
953 * this page is already dirty in another (older) snap 949 * this page is already dirty in another (older) snap
954 * context! is it writeable now? 950 * context! is it writeable now?
955 */ 951 */
956 snapc = get_oldest_context(inode, NULL); 952 oldest = get_oldest_context(inode, NULL);
957 up_read(&mdsc->snap_rwsem); 953 up_read(&mdsc->snap_rwsem);
958 954
959 if (snapc != (void *)page->private) { 955 if (snapc->seq > oldest->seq) {
956 ceph_put_snap_context(oldest);
960 dout(" page %p snapc %p not current or oldest\n", 957 dout(" page %p snapc %p not current or oldest\n",
961 page, (void *)page->private); 958 page, snapc);
962 /* 959 /*
963 * queue for writeback, and wait for snapc to 960 * queue for writeback, and wait for snapc to
964 * be writeable or written 961 * be writeable or written
965 */ 962 */
966 snapc = ceph_get_snap_context((void *)page->private); 963 snapc = ceph_get_snap_context(snapc);
967 unlock_page(page); 964 unlock_page(page);
968 ceph_queue_writeback(inode); 965 ceph_queue_writeback(inode);
969 r = wait_event_interruptible(ci->i_cap_wq, 966 r = wait_event_interruptible(ci->i_cap_wq,
@@ -973,6 +970,7 @@ retry_locked:
973 return r; 970 return r;
974 return -EAGAIN; 971 return -EAGAIN;
975 } 972 }
973 ceph_put_snap_context(oldest);
976 974
977 /* yay, writeable, do it now (without dropping page lock) */ 975 /* yay, writeable, do it now (without dropping page lock) */
978 dout(" page %p snapc %p not current, but oldest\n", 976 dout(" page %p snapc %p not current, but oldest\n",
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 3710e077a857..aa2239fa9a3b 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1205,6 +1205,12 @@ retry:
1205 if (capsnap->dirty_pages || capsnap->writing) 1205 if (capsnap->dirty_pages || capsnap->writing)
1206 continue; 1206 continue;
1207 1207
1208 /*
1209 * if cap writeback already occurred, we should have dropped
1210 * the capsnap in ceph_put_wrbuffer_cap_refs.
1211 */
1212 BUG_ON(capsnap->dirty == 0);
1213
1208 /* pick mds, take s_mutex */ 1214 /* pick mds, take s_mutex */
1209 mds = __ceph_get_cap_mds(ci, &mseq); 1215 mds = __ceph_get_cap_mds(ci, &mseq);
1210 if (session && session->s_mds != mds) { 1216 if (session && session->s_mds != mds) {
@@ -2118,8 +2124,8 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2118 } 2124 }
2119 spin_unlock(&inode->i_lock); 2125 spin_unlock(&inode->i_lock);
2120 2126
2121 dout("put_cap_refs %p had %s %s\n", inode, ceph_cap_string(had), 2127 dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
2122 last ? "last" : ""); 2128 last ? " last" : "", put ? " put" : "");
2123 2129
2124 if (last && !flushsnaps) 2130 if (last && !flushsnaps)
2125 ceph_check_caps(ci, 0, NULL); 2131 ceph_check_caps(ci, 0, NULL);
@@ -2143,7 +2149,8 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2143{ 2149{
2144 struct inode *inode = &ci->vfs_inode; 2150 struct inode *inode = &ci->vfs_inode;
2145 int last = 0; 2151 int last = 0;
2146 int last_snap = 0; 2152 int complete_capsnap = 0;
2153 int drop_capsnap = 0;
2147 int found = 0; 2154 int found = 0;
2148 struct ceph_cap_snap *capsnap = NULL; 2155 struct ceph_cap_snap *capsnap = NULL;
2149 2156
@@ -2166,19 +2173,32 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2166 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 2173 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2167 if (capsnap->context == snapc) { 2174 if (capsnap->context == snapc) {
2168 found = 1; 2175 found = 1;
2169 capsnap->dirty_pages -= nr;
2170 last_snap = !capsnap->dirty_pages;
2171 break; 2176 break;
2172 } 2177 }
2173 } 2178 }
2174 BUG_ON(!found); 2179 BUG_ON(!found);
2180 capsnap->dirty_pages -= nr;
2181 if (capsnap->dirty_pages == 0) {
2182 complete_capsnap = 1;
2183 if (capsnap->dirty == 0)
2184 /* cap writeback completed before we created
2185 * the cap_snap; no FLUSHSNAP is needed */
2186 drop_capsnap = 1;
2187 }
2175 dout("put_wrbuffer_cap_refs on %p cap_snap %p " 2188 dout("put_wrbuffer_cap_refs on %p cap_snap %p "
2176 " snap %lld %d/%d -> %d/%d %s%s\n", 2189 " snap %lld %d/%d -> %d/%d %s%s%s\n",
2177 inode, capsnap, capsnap->context->seq, 2190 inode, capsnap, capsnap->context->seq,
2178 ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr, 2191 ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
2179 ci->i_wrbuffer_ref, capsnap->dirty_pages, 2192 ci->i_wrbuffer_ref, capsnap->dirty_pages,
2180 last ? " (wrbuffer last)" : "", 2193 last ? " (wrbuffer last)" : "",
2181 last_snap ? " (capsnap last)" : ""); 2194 complete_capsnap ? " (complete capsnap)" : "",
2195 drop_capsnap ? " (drop capsnap)" : "");
2196 if (drop_capsnap) {
2197 ceph_put_snap_context(capsnap->context);
2198 list_del(&capsnap->ci_item);
2199 list_del(&capsnap->flushing_item);
2200 ceph_put_cap_snap(capsnap);
2201 }
2182 } 2202 }
2183 2203
2184 spin_unlock(&inode->i_lock); 2204 spin_unlock(&inode->i_lock);
@@ -2186,10 +2206,12 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2186 if (last) { 2206 if (last) {
2187 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 2207 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2188 iput(inode); 2208 iput(inode);
2189 } else if (last_snap) { 2209 } else if (complete_capsnap) {
2190 ceph_flush_snaps(ci); 2210 ceph_flush_snaps(ci);
2191 wake_up(&ci->i_cap_wq); 2211 wake_up(&ci->i_cap_wq);
2192 } 2212 }
2213 if (drop_capsnap)
2214 iput(inode);
2193} 2215}
2194 2216
2195/* 2217/*
@@ -2465,8 +2487,8 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
2465 break; 2487 break;
2466 } 2488 }
2467 WARN_ON(capsnap->dirty_pages || capsnap->writing); 2489 WARN_ON(capsnap->dirty_pages || capsnap->writing);
2468 dout(" removing cap_snap %p follows %lld\n", 2490 dout(" removing %p cap_snap %p follows %lld\n",
2469 capsnap, follows); 2491 inode, capsnap, follows);
2470 ceph_put_snap_context(capsnap->context); 2492 ceph_put_snap_context(capsnap->context);
2471 list_del(&capsnap->ci_item); 2493 list_del(&capsnap->ci_item);
2472 list_del(&capsnap->flushing_item); 2494 list_del(&capsnap->flushing_item);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 7261dc6c2ead..ea8ee2e526aa 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -171,11 +171,11 @@ more:
171 spin_lock(&inode->i_lock); 171 spin_lock(&inode->i_lock);
172 spin_lock(&dcache_lock); 172 spin_lock(&dcache_lock);
173 173
174 last = dentry;
175
174 if (err < 0) 176 if (err < 0)
175 goto out_unlock; 177 goto out_unlock;
176 178
177 last = dentry;
178
179 p = p->prev; 179 p = p->prev;
180 filp->f_pos++; 180 filp->f_pos++;
181 181
@@ -312,7 +312,7 @@ more:
312 req->r_readdir_offset = fi->next_offset; 312 req->r_readdir_offset = fi->next_offset;
313 req->r_args.readdir.frag = cpu_to_le32(frag); 313 req->r_args.readdir.frag = cpu_to_le32(frag);
314 req->r_args.readdir.max_entries = cpu_to_le32(max_entries); 314 req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
315 req->r_num_caps = max_entries; 315 req->r_num_caps = max_entries + 1;
316 err = ceph_mdsc_do_request(mdsc, NULL, req); 316 err = ceph_mdsc_do_request(mdsc, NULL, req);
317 if (err < 0) { 317 if (err < 0) {
318 ceph_mdsc_put_request(req); 318 ceph_mdsc_put_request(req);
@@ -489,6 +489,7 @@ struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
489 struct inode *inode = ceph_get_snapdir(parent); 489 struct inode *inode = ceph_get_snapdir(parent);
490 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n", 490 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
491 dentry, dentry->d_name.len, dentry->d_name.name, inode); 491 dentry, dentry->d_name.len, dentry->d_name.name, inode);
492 BUG_ON(!d_unhashed(dentry));
492 d_add(dentry, inode); 493 d_add(dentry, inode);
493 err = 0; 494 err = 0;
494 } 495 }
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index aca82d55cc53..26f883c275e8 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -886,6 +886,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
886 struct inode *in = NULL; 886 struct inode *in = NULL;
887 struct ceph_mds_reply_inode *ininfo; 887 struct ceph_mds_reply_inode *ininfo;
888 struct ceph_vino vino; 888 struct ceph_vino vino;
889 struct ceph_client *client = ceph_sb_to_client(sb);
889 int i = 0; 890 int i = 0;
890 int err = 0; 891 int err = 0;
891 892
@@ -949,7 +950,14 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
949 return err; 950 return err;
950 } 951 }
951 952
952 if (rinfo->head->is_dentry && !req->r_aborted) { 953 /*
954 * ignore null lease/binding on snapdir ENOENT, or else we
955 * will have trouble splicing in the virtual snapdir later
956 */
957 if (rinfo->head->is_dentry && !req->r_aborted &&
958 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
959 client->mount_args->snapdir_name,
960 req->r_dentry->d_name.len))) {
953 /* 961 /*
954 * lookup link rename : null -> possibly existing inode 962 * lookup link rename : null -> possibly existing inode
955 * mknod symlink mkdir : null -> new inode 963 * mknod symlink mkdir : null -> new inode
diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c
index 8f1715ffbe4b..cdaaa131add3 100644
--- a/fs/ceph/messenger.c
+++ b/fs/ceph/messenger.c
@@ -30,6 +30,10 @@ static char tag_msg = CEPH_MSGR_TAG_MSG;
30static char tag_ack = CEPH_MSGR_TAG_ACK; 30static char tag_ack = CEPH_MSGR_TAG_ACK;
31static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE; 31static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
32 32
33#ifdef CONFIG_LOCKDEP
34static struct lock_class_key socket_class;
35#endif
36
33 37
34static void queue_con(struct ceph_connection *con); 38static void queue_con(struct ceph_connection *con);
35static void con_work(struct work_struct *); 39static void con_work(struct work_struct *);
@@ -228,6 +232,10 @@ static struct socket *ceph_tcp_connect(struct ceph_connection *con)
228 con->sock = sock; 232 con->sock = sock;
229 sock->sk->sk_allocation = GFP_NOFS; 233 sock->sk->sk_allocation = GFP_NOFS;
230 234
235#ifdef CONFIG_LOCKDEP
236 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
237#endif
238
231 set_sock_callbacks(sock, con); 239 set_sock_callbacks(sock, con);
232 240
233 dout("connect %s\n", pr_addr(&con->peer_addr.in_addr)); 241 dout("connect %s\n", pr_addr(&con->peer_addr.in_addr));
@@ -333,6 +341,7 @@ static void reset_connection(struct ceph_connection *con)
333 con->out_msg = NULL; 341 con->out_msg = NULL;
334 } 342 }
335 con->in_seq = 0; 343 con->in_seq = 0;
344 con->in_seq_acked = 0;
336} 345}
337 346
338/* 347/*
diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c
index 21c6623c4b07..2e2c15eed82a 100644
--- a/fs/ceph/osdmap.c
+++ b/fs/ceph/osdmap.c
@@ -314,71 +314,6 @@ bad:
314 return ERR_PTR(err); 314 return ERR_PTR(err);
315} 315}
316 316
317
318/*
319 * osd map
320 */
321void ceph_osdmap_destroy(struct ceph_osdmap *map)
322{
323 dout("osdmap_destroy %p\n", map);
324 if (map->crush)
325 crush_destroy(map->crush);
326 while (!RB_EMPTY_ROOT(&map->pg_temp)) {
327 struct ceph_pg_mapping *pg =
328 rb_entry(rb_first(&map->pg_temp),
329 struct ceph_pg_mapping, node);
330 rb_erase(&pg->node, &map->pg_temp);
331 kfree(pg);
332 }
333 while (!RB_EMPTY_ROOT(&map->pg_pools)) {
334 struct ceph_pg_pool_info *pi =
335 rb_entry(rb_first(&map->pg_pools),
336 struct ceph_pg_pool_info, node);
337 rb_erase(&pi->node, &map->pg_pools);
338 kfree(pi);
339 }
340 kfree(map->osd_state);
341 kfree(map->osd_weight);
342 kfree(map->osd_addr);
343 kfree(map);
344}
345
346/*
347 * adjust max osd value. reallocate arrays.
348 */
349static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
350{
351 u8 *state;
352 struct ceph_entity_addr *addr;
353 u32 *weight;
354
355 state = kcalloc(max, sizeof(*state), GFP_NOFS);
356 addr = kcalloc(max, sizeof(*addr), GFP_NOFS);
357 weight = kcalloc(max, sizeof(*weight), GFP_NOFS);
358 if (state == NULL || addr == NULL || weight == NULL) {
359 kfree(state);
360 kfree(addr);
361 kfree(weight);
362 return -ENOMEM;
363 }
364
365 /* copy old? */
366 if (map->osd_state) {
367 memcpy(state, map->osd_state, map->max_osd*sizeof(*state));
368 memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr));
369 memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight));
370 kfree(map->osd_state);
371 kfree(map->osd_addr);
372 kfree(map->osd_weight);
373 }
374
375 map->osd_state = state;
376 map->osd_weight = weight;
377 map->osd_addr = addr;
378 map->max_osd = max;
379 return 0;
380}
381
382/* 317/*
383 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid 318 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
384 * to a set of osds) 319 * to a set of osds)
@@ -482,6 +417,13 @@ static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
482 return NULL; 417 return NULL;
483} 418}
484 419
420static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
421{
422 rb_erase(&pi->node, root);
423 kfree(pi->name);
424 kfree(pi);
425}
426
485void __decode_pool(void **p, struct ceph_pg_pool_info *pi) 427void __decode_pool(void **p, struct ceph_pg_pool_info *pi)
486{ 428{
487 ceph_decode_copy(p, &pi->v, sizeof(pi->v)); 429 ceph_decode_copy(p, &pi->v, sizeof(pi->v));
@@ -490,6 +432,98 @@ void __decode_pool(void **p, struct ceph_pg_pool_info *pi)
490 *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2; 432 *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2;
491} 433}
492 434
435static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
436{
437 struct ceph_pg_pool_info *pi;
438 u32 num, len, pool;
439
440 ceph_decode_32_safe(p, end, num, bad);
441 dout(" %d pool names\n", num);
442 while (num--) {
443 ceph_decode_32_safe(p, end, pool, bad);
444 ceph_decode_32_safe(p, end, len, bad);
445 dout(" pool %d len %d\n", pool, len);
446 pi = __lookup_pg_pool(&map->pg_pools, pool);
447 if (pi) {
448 kfree(pi->name);
449 pi->name = kmalloc(len + 1, GFP_NOFS);
450 if (pi->name) {
451 memcpy(pi->name, *p, len);
452 pi->name[len] = '\0';
453 dout(" name is %s\n", pi->name);
454 }
455 }
456 *p += len;
457 }
458 return 0;
459
460bad:
461 return -EINVAL;
462}
463
464/*
465 * osd map
466 */
467void ceph_osdmap_destroy(struct ceph_osdmap *map)
468{
469 dout("osdmap_destroy %p\n", map);
470 if (map->crush)
471 crush_destroy(map->crush);
472 while (!RB_EMPTY_ROOT(&map->pg_temp)) {
473 struct ceph_pg_mapping *pg =
474 rb_entry(rb_first(&map->pg_temp),
475 struct ceph_pg_mapping, node);
476 rb_erase(&pg->node, &map->pg_temp);
477 kfree(pg);
478 }
479 while (!RB_EMPTY_ROOT(&map->pg_pools)) {
480 struct ceph_pg_pool_info *pi =
481 rb_entry(rb_first(&map->pg_pools),
482 struct ceph_pg_pool_info, node);
483 __remove_pg_pool(&map->pg_pools, pi);
484 }
485 kfree(map->osd_state);
486 kfree(map->osd_weight);
487 kfree(map->osd_addr);
488 kfree(map);
489}
490
491/*
492 * adjust max osd value. reallocate arrays.
493 */
494static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
495{
496 u8 *state;
497 struct ceph_entity_addr *addr;
498 u32 *weight;
499
500 state = kcalloc(max, sizeof(*state), GFP_NOFS);
501 addr = kcalloc(max, sizeof(*addr), GFP_NOFS);
502 weight = kcalloc(max, sizeof(*weight), GFP_NOFS);
503 if (state == NULL || addr == NULL || weight == NULL) {
504 kfree(state);
505 kfree(addr);
506 kfree(weight);
507 return -ENOMEM;
508 }
509
510 /* copy old? */
511 if (map->osd_state) {
512 memcpy(state, map->osd_state, map->max_osd*sizeof(*state));
513 memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr));
514 memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight));
515 kfree(map->osd_state);
516 kfree(map->osd_addr);
517 kfree(map->osd_weight);
518 }
519
520 map->osd_state = state;
521 map->osd_weight = weight;
522 map->osd_addr = addr;
523 map->max_osd = max;
524 return 0;
525}
526
493/* 527/*
494 * decode a full map. 528 * decode a full map.
495 */ 529 */
@@ -526,7 +560,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
526 ceph_decode_32_safe(p, end, max, bad); 560 ceph_decode_32_safe(p, end, max, bad);
527 while (max--) { 561 while (max--) {
528 ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad); 562 ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
529 pi = kmalloc(sizeof(*pi), GFP_NOFS); 563 pi = kzalloc(sizeof(*pi), GFP_NOFS);
530 if (!pi) 564 if (!pi)
531 goto bad; 565 goto bad;
532 pi->id = ceph_decode_32(p); 566 pi->id = ceph_decode_32(p);
@@ -539,6 +573,10 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
539 __decode_pool(p, pi); 573 __decode_pool(p, pi);
540 __insert_pg_pool(&map->pg_pools, pi); 574 __insert_pg_pool(&map->pg_pools, pi);
541 } 575 }
576
577 if (version >= 5 && __decode_pool_names(p, end, map) < 0)
578 goto bad;
579
542 ceph_decode_32_safe(p, end, map->pool_max, bad); 580 ceph_decode_32_safe(p, end, map->pool_max, bad);
543 581
544 ceph_decode_32_safe(p, end, map->flags, bad); 582 ceph_decode_32_safe(p, end, map->flags, bad);
@@ -712,7 +750,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
712 } 750 }
713 pi = __lookup_pg_pool(&map->pg_pools, pool); 751 pi = __lookup_pg_pool(&map->pg_pools, pool);
714 if (!pi) { 752 if (!pi) {
715 pi = kmalloc(sizeof(*pi), GFP_NOFS); 753 pi = kzalloc(sizeof(*pi), GFP_NOFS);
716 if (!pi) { 754 if (!pi) {
717 err = -ENOMEM; 755 err = -ENOMEM;
718 goto bad; 756 goto bad;
@@ -722,6 +760,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
722 } 760 }
723 __decode_pool(p, pi); 761 __decode_pool(p, pi);
724 } 762 }
763 if (version >= 5 && __decode_pool_names(p, end, map) < 0)
764 goto bad;
725 765
726 /* old_pool */ 766 /* old_pool */
727 ceph_decode_32_safe(p, end, len, bad); 767 ceph_decode_32_safe(p, end, len, bad);
@@ -730,10 +770,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
730 770
731 ceph_decode_32_safe(p, end, pool, bad); 771 ceph_decode_32_safe(p, end, pool, bad);
732 pi = __lookup_pg_pool(&map->pg_pools, pool); 772 pi = __lookup_pg_pool(&map->pg_pools, pool);
733 if (pi) { 773 if (pi)
734 rb_erase(&pi->node, &map->pg_pools); 774 __remove_pg_pool(&map->pg_pools, pi);
735 kfree(pi);
736 }
737 } 775 }
738 776
739 /* new_up */ 777 /* new_up */
diff --git a/fs/ceph/osdmap.h b/fs/ceph/osdmap.h
index 1fb55afb2642..8bc9f1e4f562 100644
--- a/fs/ceph/osdmap.h
+++ b/fs/ceph/osdmap.h
@@ -23,6 +23,7 @@ struct ceph_pg_pool_info {
23 int id; 23 int id;
24 struct ceph_pg_pool v; 24 struct ceph_pg_pool v;
25 int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask; 25 int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask;
26 char *name;
26}; 27};
27 28
28struct ceph_pg_mapping { 29struct ceph_pg_mapping {
diff --git a/fs/ceph/rados.h b/fs/ceph/rados.h
index 26ac8b89a676..a1fc1d017b58 100644
--- a/fs/ceph/rados.h
+++ b/fs/ceph/rados.h
@@ -11,8 +11,10 @@
11/* 11/*
12 * osdmap encoding versions 12 * osdmap encoding versions
13 */ 13 */
14#define CEPH_OSDMAP_INC_VERSION 4 14#define CEPH_OSDMAP_INC_VERSION 5
15#define CEPH_OSDMAP_VERSION 4 15#define CEPH_OSDMAP_INC_VERSION_EXT 5
16#define CEPH_OSDMAP_VERSION 5
17#define CEPH_OSDMAP_VERSION_EXT 5
16 18
17/* 19/*
18 * fs id 20 * fs id
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index e6f9bc57d472..2b881262ef67 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -431,8 +431,7 @@ static int dup_array(u64 **dst, __le64 *src, int num)
431 * Caller must hold snap_rwsem for read (i.e., the realm topology won't 431 * Caller must hold snap_rwsem for read (i.e., the realm topology won't
432 * change). 432 * change).
433 */ 433 */
434void ceph_queue_cap_snap(struct ceph_inode_info *ci, 434void ceph_queue_cap_snap(struct ceph_inode_info *ci)
435 struct ceph_snap_context *snapc)
436{ 435{
437 struct inode *inode = &ci->vfs_inode; 436 struct inode *inode = &ci->vfs_inode;
438 struct ceph_cap_snap *capsnap; 437 struct ceph_cap_snap *capsnap;
@@ -451,10 +450,11 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci,
451 as no new writes are allowed to start when pending, so any 450 as no new writes are allowed to start when pending, so any
452 writes in progress now were started before the previous 451 writes in progress now were started before the previous
453 cap_snap. lucky us. */ 452 cap_snap. lucky us. */
454 dout("queue_cap_snap %p snapc %p seq %llu used %d" 453 dout("queue_cap_snap %p already pending\n", inode);
455 " already pending\n", inode, snapc, snapc->seq, used);
456 kfree(capsnap); 454 kfree(capsnap);
457 } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR)) { 455 } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR)) {
456 struct ceph_snap_context *snapc = ci->i_head_snapc;
457
458 igrab(inode); 458 igrab(inode);
459 459
460 atomic_set(&capsnap->nref, 1); 460 atomic_set(&capsnap->nref, 1);
@@ -463,7 +463,6 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci,
463 INIT_LIST_HEAD(&capsnap->flushing_item); 463 INIT_LIST_HEAD(&capsnap->flushing_item);
464 464
465 capsnap->follows = snapc->seq - 1; 465 capsnap->follows = snapc->seq - 1;
466 capsnap->context = ceph_get_snap_context(snapc);
467 capsnap->issued = __ceph_caps_issued(ci, NULL); 466 capsnap->issued = __ceph_caps_issued(ci, NULL);
468 capsnap->dirty = __ceph_caps_dirty(ci); 467 capsnap->dirty = __ceph_caps_dirty(ci);
469 468
@@ -480,7 +479,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci,
480 snapshot. */ 479 snapshot. */
481 capsnap->dirty_pages = ci->i_wrbuffer_ref_head; 480 capsnap->dirty_pages = ci->i_wrbuffer_ref_head;
482 ci->i_wrbuffer_ref_head = 0; 481 ci->i_wrbuffer_ref_head = 0;
483 ceph_put_snap_context(ci->i_head_snapc); 482 capsnap->context = snapc;
484 ci->i_head_snapc = NULL; 483 ci->i_head_snapc = NULL;
485 list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps); 484 list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
486 485
@@ -522,15 +521,17 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
522 capsnap->ctime = inode->i_ctime; 521 capsnap->ctime = inode->i_ctime;
523 capsnap->time_warp_seq = ci->i_time_warp_seq; 522 capsnap->time_warp_seq = ci->i_time_warp_seq;
524 if (capsnap->dirty_pages) { 523 if (capsnap->dirty_pages) {
525 dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu " 524 dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu "
526 "still has %d dirty pages\n", inode, capsnap, 525 "still has %d dirty pages\n", inode, capsnap,
527 capsnap->context, capsnap->context->seq, 526 capsnap->context, capsnap->context->seq,
528 capsnap->size, capsnap->dirty_pages); 527 ceph_cap_string(capsnap->dirty), capsnap->size,
528 capsnap->dirty_pages);
529 return 0; 529 return 0;
530 } 530 }
531 dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu clean\n", 531 dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu\n",
532 inode, capsnap, capsnap->context, 532 inode, capsnap, capsnap->context,
533 capsnap->context->seq, capsnap->size); 533 capsnap->context->seq, ceph_cap_string(capsnap->dirty),
534 capsnap->size);
534 535
535 spin_lock(&mdsc->snap_flush_lock); 536 spin_lock(&mdsc->snap_flush_lock);
536 list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list); 537 list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
@@ -602,7 +603,7 @@ more:
602 if (lastinode) 603 if (lastinode)
603 iput(lastinode); 604 iput(lastinode);
604 lastinode = inode; 605 lastinode = inode;
605 ceph_queue_cap_snap(ci, realm->cached_context); 606 ceph_queue_cap_snap(ci);
606 spin_lock(&realm->inodes_with_caps_lock); 607 spin_lock(&realm->inodes_with_caps_lock);
607 } 608 }
608 spin_unlock(&realm->inodes_with_caps_lock); 609 spin_unlock(&realm->inodes_with_caps_lock);
@@ -824,8 +825,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
824 spin_unlock(&realm->inodes_with_caps_lock); 825 spin_unlock(&realm->inodes_with_caps_lock);
825 spin_unlock(&inode->i_lock); 826 spin_unlock(&inode->i_lock);
826 827
827 ceph_queue_cap_snap(ci, 828 ceph_queue_cap_snap(ci);
828 ci->i_snap_realm->cached_context);
829 829
830 iput(inode); 830 iput(inode);
831 continue; 831 continue;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index ca702c67bc66..e30dfbb056c3 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -715,8 +715,7 @@ extern int ceph_update_snap_trace(struct ceph_mds_client *m,
715extern void ceph_handle_snap(struct ceph_mds_client *mdsc, 715extern void ceph_handle_snap(struct ceph_mds_client *mdsc,
716 struct ceph_mds_session *session, 716 struct ceph_mds_session *session,
717 struct ceph_msg *msg); 717 struct ceph_msg *msg);
718extern void ceph_queue_cap_snap(struct ceph_inode_info *ci, 718extern void ceph_queue_cap_snap(struct ceph_inode_info *ci);
719 struct ceph_snap_context *snapc);
720extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci, 719extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
721 struct ceph_cap_snap *capsnap); 720 struct ceph_cap_snap *capsnap);
722extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc); 721extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc);
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index efb2b9400391..1cc087635a5e 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -382,8 +382,8 @@ out:
382static void ecryptfs_lower_offset_for_extent(loff_t *offset, loff_t extent_num, 382static void ecryptfs_lower_offset_for_extent(loff_t *offset, loff_t extent_num,
383 struct ecryptfs_crypt_stat *crypt_stat) 383 struct ecryptfs_crypt_stat *crypt_stat)
384{ 384{
385 (*offset) = (crypt_stat->num_header_bytes_at_front 385 (*offset) = ecryptfs_lower_header_size(crypt_stat)
386 + (crypt_stat->extent_size * extent_num)); 386 + (crypt_stat->extent_size * extent_num);
387} 387}
388 388
389/** 389/**
@@ -835,13 +835,13 @@ void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat)
835 set_extent_mask_and_shift(crypt_stat); 835 set_extent_mask_and_shift(crypt_stat);
836 crypt_stat->iv_bytes = ECRYPTFS_DEFAULT_IV_BYTES; 836 crypt_stat->iv_bytes = ECRYPTFS_DEFAULT_IV_BYTES;
837 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) 837 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
838 crypt_stat->num_header_bytes_at_front = 0; 838 crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
839 else { 839 else {
840 if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE) 840 if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)
841 crypt_stat->num_header_bytes_at_front = 841 crypt_stat->metadata_size =
842 ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; 842 ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
843 else 843 else
844 crypt_stat->num_header_bytes_at_front = PAGE_CACHE_SIZE; 844 crypt_stat->metadata_size = PAGE_CACHE_SIZE;
845 } 845 }
846} 846}
847 847
@@ -1108,9 +1108,9 @@ static void write_ecryptfs_marker(char *page_virt, size_t *written)
1108 (*written) = MAGIC_ECRYPTFS_MARKER_SIZE_BYTES; 1108 (*written) = MAGIC_ECRYPTFS_MARKER_SIZE_BYTES;
1109} 1109}
1110 1110
1111static void 1111void ecryptfs_write_crypt_stat_flags(char *page_virt,
1112write_ecryptfs_flags(char *page_virt, struct ecryptfs_crypt_stat *crypt_stat, 1112 struct ecryptfs_crypt_stat *crypt_stat,
1113 size_t *written) 1113 size_t *written)
1114{ 1114{
1115 u32 flags = 0; 1115 u32 flags = 0;
1116 int i; 1116 int i;
@@ -1238,8 +1238,7 @@ ecryptfs_write_header_metadata(char *virt,
1238 1238
1239 header_extent_size = (u32)crypt_stat->extent_size; 1239 header_extent_size = (u32)crypt_stat->extent_size;
1240 num_header_extents_at_front = 1240 num_header_extents_at_front =
1241 (u16)(crypt_stat->num_header_bytes_at_front 1241 (u16)(crypt_stat->metadata_size / crypt_stat->extent_size);
1242 / crypt_stat->extent_size);
1243 put_unaligned_be32(header_extent_size, virt); 1242 put_unaligned_be32(header_extent_size, virt);
1244 virt += 4; 1243 virt += 4;
1245 put_unaligned_be16(num_header_extents_at_front, virt); 1244 put_unaligned_be16(num_header_extents_at_front, virt);
@@ -1292,7 +1291,8 @@ static int ecryptfs_write_headers_virt(char *page_virt, size_t max,
1292 offset = ECRYPTFS_FILE_SIZE_BYTES; 1291 offset = ECRYPTFS_FILE_SIZE_BYTES;
1293 write_ecryptfs_marker((page_virt + offset), &written); 1292 write_ecryptfs_marker((page_virt + offset), &written);
1294 offset += written; 1293 offset += written;
1295 write_ecryptfs_flags((page_virt + offset), crypt_stat, &written); 1294 ecryptfs_write_crypt_stat_flags((page_virt + offset), crypt_stat,
1295 &written);
1296 offset += written; 1296 offset += written;
1297 ecryptfs_write_header_metadata((page_virt + offset), crypt_stat, 1297 ecryptfs_write_header_metadata((page_virt + offset), crypt_stat,
1298 &written); 1298 &written);
@@ -1382,7 +1382,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
1382 rc = -EINVAL; 1382 rc = -EINVAL;
1383 goto out; 1383 goto out;
1384 } 1384 }
1385 virt_len = crypt_stat->num_header_bytes_at_front; 1385 virt_len = crypt_stat->metadata_size;
1386 order = get_order(virt_len); 1386 order = get_order(virt_len);
1387 /* Released in this function */ 1387 /* Released in this function */
1388 virt = (char *)ecryptfs_get_zeroed_pages(GFP_KERNEL, order); 1388 virt = (char *)ecryptfs_get_zeroed_pages(GFP_KERNEL, order);
@@ -1428,16 +1428,15 @@ static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat,
1428 header_extent_size = get_unaligned_be32(virt); 1428 header_extent_size = get_unaligned_be32(virt);
1429 virt += sizeof(__be32); 1429 virt += sizeof(__be32);
1430 num_header_extents_at_front = get_unaligned_be16(virt); 1430 num_header_extents_at_front = get_unaligned_be16(virt);
1431 crypt_stat->num_header_bytes_at_front = 1431 crypt_stat->metadata_size = (((size_t)num_header_extents_at_front
1432 (((size_t)num_header_extents_at_front 1432 * (size_t)header_extent_size));
1433 * (size_t)header_extent_size));
1434 (*bytes_read) = (sizeof(__be32) + sizeof(__be16)); 1433 (*bytes_read) = (sizeof(__be32) + sizeof(__be16));
1435 if ((validate_header_size == ECRYPTFS_VALIDATE_HEADER_SIZE) 1434 if ((validate_header_size == ECRYPTFS_VALIDATE_HEADER_SIZE)
1436 && (crypt_stat->num_header_bytes_at_front 1435 && (crypt_stat->metadata_size
1437 < ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)) { 1436 < ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)) {
1438 rc = -EINVAL; 1437 rc = -EINVAL;
1439 printk(KERN_WARNING "Invalid header size: [%zd]\n", 1438 printk(KERN_WARNING "Invalid header size: [%zd]\n",
1440 crypt_stat->num_header_bytes_at_front); 1439 crypt_stat->metadata_size);
1441 } 1440 }
1442 return rc; 1441 return rc;
1443} 1442}
@@ -1452,8 +1451,7 @@ static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat,
1452 */ 1451 */
1453static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat) 1452static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat)
1454{ 1453{
1455 crypt_stat->num_header_bytes_at_front = 1454 crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
1456 ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
1457} 1455}
1458 1456
1459/** 1457/**
@@ -1607,6 +1605,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
1607 ecryptfs_dentry, 1605 ecryptfs_dentry,
1608 ECRYPTFS_VALIDATE_HEADER_SIZE); 1606 ECRYPTFS_VALIDATE_HEADER_SIZE);
1609 if (rc) { 1607 if (rc) {
1608 memset(page_virt, 0, PAGE_CACHE_SIZE);
1610 rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode); 1609 rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode);
1611 if (rc) { 1610 if (rc) {
1612 printk(KERN_DEBUG "Valid eCryptfs headers not found in " 1611 printk(KERN_DEBUG "Valid eCryptfs headers not found in "
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 542f625312f3..bc7115403f38 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -273,7 +273,7 @@ struct ecryptfs_crypt_stat {
273 u32 flags; 273 u32 flags;
274 unsigned int file_version; 274 unsigned int file_version;
275 size_t iv_bytes; 275 size_t iv_bytes;
276 size_t num_header_bytes_at_front; 276 size_t metadata_size;
277 size_t extent_size; /* Data extent size; default is 4096 */ 277 size_t extent_size; /* Data extent size; default is 4096 */
278 size_t key_size; 278 size_t key_size;
279 size_t extent_shift; 279 size_t extent_shift;
@@ -464,6 +464,14 @@ struct ecryptfs_daemon {
464 464
465extern struct mutex ecryptfs_daemon_hash_mux; 465extern struct mutex ecryptfs_daemon_hash_mux;
466 466
467static inline size_t
468ecryptfs_lower_header_size(struct ecryptfs_crypt_stat *crypt_stat)
469{
470 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
471 return 0;
472 return crypt_stat->metadata_size;
473}
474
467static inline struct ecryptfs_file_info * 475static inline struct ecryptfs_file_info *
468ecryptfs_file_to_private(struct file *file) 476ecryptfs_file_to_private(struct file *file)
469{ 477{
@@ -651,6 +659,9 @@ int ecryptfs_decrypt_page(struct page *page);
651int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry); 659int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry);
652int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry); 660int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry);
653int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry); 661int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry);
662void ecryptfs_write_crypt_stat_flags(char *page_virt,
663 struct ecryptfs_crypt_stat *crypt_stat,
664 size_t *written);
654int ecryptfs_read_and_validate_header_region(char *data, 665int ecryptfs_read_and_validate_header_region(char *data,
655 struct inode *ecryptfs_inode); 666 struct inode *ecryptfs_inode);
656int ecryptfs_read_and_validate_xattr_region(char *page_virt, 667int ecryptfs_read_and_validate_xattr_region(char *page_virt,
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index d3362faf3852..e2d4418affac 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -324,6 +324,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
324 rc = ecryptfs_read_and_validate_header_region(page_virt, 324 rc = ecryptfs_read_and_validate_header_region(page_virt,
325 ecryptfs_dentry->d_inode); 325 ecryptfs_dentry->d_inode);
326 if (rc) { 326 if (rc) {
327 memset(page_virt, 0, PAGE_CACHE_SIZE);
327 rc = ecryptfs_read_and_validate_xattr_region(page_virt, 328 rc = ecryptfs_read_and_validate_xattr_region(page_virt,
328 ecryptfs_dentry); 329 ecryptfs_dentry);
329 if (rc) { 330 if (rc) {
@@ -336,7 +337,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
336 ecryptfs_dentry->d_sb)->mount_crypt_stat; 337 ecryptfs_dentry->d_sb)->mount_crypt_stat;
337 if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) { 338 if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
338 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) 339 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
339 file_size = (crypt_stat->num_header_bytes_at_front 340 file_size = (crypt_stat->metadata_size
340 + i_size_read(lower_dentry->d_inode)); 341 + i_size_read(lower_dentry->d_inode));
341 else 342 else
342 file_size = i_size_read(lower_dentry->d_inode); 343 file_size = i_size_read(lower_dentry->d_inode);
@@ -388,9 +389,9 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
388 mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); 389 mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
389 if (IS_ERR(lower_dentry)) { 390 if (IS_ERR(lower_dentry)) {
390 rc = PTR_ERR(lower_dentry); 391 rc = PTR_ERR(lower_dentry);
391 printk(KERN_ERR "%s: lookup_one_len() returned [%d] on " 392 ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
392 "lower_dentry = [%s]\n", __func__, rc, 393 "[%d] on lower_dentry = [%s]\n", __func__, rc,
393 ecryptfs_dentry->d_name.name); 394 encrypted_and_encoded_name);
394 goto out_d_drop; 395 goto out_d_drop;
395 } 396 }
396 if (lower_dentry->d_inode) 397 if (lower_dentry->d_inode)
@@ -417,9 +418,9 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
417 mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); 418 mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
418 if (IS_ERR(lower_dentry)) { 419 if (IS_ERR(lower_dentry)) {
419 rc = PTR_ERR(lower_dentry); 420 rc = PTR_ERR(lower_dentry);
420 printk(KERN_ERR "%s: lookup_one_len() returned [%d] on " 421 ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
421 "lower_dentry = [%s]\n", __func__, rc, 422 "[%d] on lower_dentry = [%s]\n", __func__, rc,
422 encrypted_and_encoded_name); 423 encrypted_and_encoded_name);
423 goto out_d_drop; 424 goto out_d_drop;
424 } 425 }
425lookup_and_interpose: 426lookup_and_interpose:
@@ -456,8 +457,8 @@ static int ecryptfs_link(struct dentry *old_dentry, struct inode *dir,
456 rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb, 0); 457 rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb, 0);
457 if (rc) 458 if (rc)
458 goto out_lock; 459 goto out_lock;
459 fsstack_copy_attr_times(dir, lower_new_dentry->d_inode); 460 fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
460 fsstack_copy_inode_size(dir, lower_new_dentry->d_inode); 461 fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode);
461 old_dentry->d_inode->i_nlink = 462 old_dentry->d_inode->i_nlink =
462 ecryptfs_inode_to_lower(old_dentry->d_inode)->i_nlink; 463 ecryptfs_inode_to_lower(old_dentry->d_inode)->i_nlink;
463 i_size_write(new_dentry->d_inode, file_size_save); 464 i_size_write(new_dentry->d_inode, file_size_save);
@@ -648,38 +649,17 @@ out_lock:
648 return rc; 649 return rc;
649} 650}
650 651
651static int 652static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
652ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz) 653 size_t *bufsiz)
653{ 654{
655 struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
654 char *lower_buf; 656 char *lower_buf;
655 size_t lower_bufsiz; 657 size_t lower_bufsiz = PATH_MAX;
656 struct dentry *lower_dentry;
657 struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
658 char *plaintext_name;
659 size_t plaintext_name_size;
660 mm_segment_t old_fs; 658 mm_segment_t old_fs;
661 int rc; 659 int rc;
662 660
663 lower_dentry = ecryptfs_dentry_to_lower(dentry);
664 if (!lower_dentry->d_inode->i_op->readlink) {
665 rc = -EINVAL;
666 goto out;
667 }
668 mount_crypt_stat = &ecryptfs_superblock_to_private(
669 dentry->d_sb)->mount_crypt_stat;
670 /*
671 * If the lower filename is encrypted, it will result in a significantly
672 * longer name. If needed, truncate the name after decode and decrypt.
673 */
674 if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)
675 lower_bufsiz = PATH_MAX;
676 else
677 lower_bufsiz = bufsiz;
678 /* Released in this function */
679 lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL); 661 lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL);
680 if (lower_buf == NULL) { 662 if (!lower_buf) {
681 printk(KERN_ERR "%s: Out of memory whilst attempting to "
682 "kmalloc [%zd] bytes\n", __func__, lower_bufsiz);
683 rc = -ENOMEM; 663 rc = -ENOMEM;
684 goto out; 664 goto out;
685 } 665 }
@@ -689,29 +669,31 @@ ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
689 (char __user *)lower_buf, 669 (char __user *)lower_buf,
690 lower_bufsiz); 670 lower_bufsiz);
691 set_fs(old_fs); 671 set_fs(old_fs);
692 if (rc >= 0) { 672 if (rc < 0)
693 rc = ecryptfs_decode_and_decrypt_filename(&plaintext_name, 673 goto out;
694 &plaintext_name_size, 674 lower_bufsiz = rc;
695 dentry, lower_buf, 675 rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry,
696 rc); 676 lower_buf, lower_bufsiz);
697 if (rc) { 677out:
698 printk(KERN_ERR "%s: Error attempting to decode and "
699 "decrypt filename; rc = [%d]\n", __func__,
700 rc);
701 goto out_free_lower_buf;
702 }
703 /* Check for bufsiz <= 0 done in sys_readlinkat() */
704 rc = copy_to_user(buf, plaintext_name,
705 min((size_t) bufsiz, plaintext_name_size));
706 if (rc)
707 rc = -EFAULT;
708 else
709 rc = plaintext_name_size;
710 kfree(plaintext_name);
711 fsstack_copy_attr_atime(dentry->d_inode, lower_dentry->d_inode);
712 }
713out_free_lower_buf:
714 kfree(lower_buf); 678 kfree(lower_buf);
679 return rc;
680}
681
682static int
683ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
684{
685 char *kbuf;
686 size_t kbufsiz, copied;
687 int rc;
688
689 rc = ecryptfs_readlink_lower(dentry, &kbuf, &kbufsiz);
690 if (rc)
691 goto out;
692 copied = min_t(size_t, bufsiz, kbufsiz);
693 rc = copy_to_user(buf, kbuf, copied) ? -EFAULT : copied;
694 kfree(kbuf);
695 fsstack_copy_attr_atime(dentry->d_inode,
696 ecryptfs_dentry_to_lower(dentry)->d_inode);
715out: 697out:
716 return rc; 698 return rc;
717} 699}
@@ -769,7 +751,7 @@ upper_size_to_lower_size(struct ecryptfs_crypt_stat *crypt_stat,
769{ 751{
770 loff_t lower_size; 752 loff_t lower_size;
771 753
772 lower_size = crypt_stat->num_header_bytes_at_front; 754 lower_size = ecryptfs_lower_header_size(crypt_stat);
773 if (upper_size != 0) { 755 if (upper_size != 0) {
774 loff_t num_extents; 756 loff_t num_extents;
775 757
@@ -1016,6 +998,28 @@ out:
1016 return rc; 998 return rc;
1017} 999}
1018 1000
1001int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry,
1002 struct kstat *stat)
1003{
1004 struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
1005 int rc = 0;
1006
1007 mount_crypt_stat = &ecryptfs_superblock_to_private(
1008 dentry->d_sb)->mount_crypt_stat;
1009 generic_fillattr(dentry->d_inode, stat);
1010 if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) {
1011 char *target;
1012 size_t targetsiz;
1013
1014 rc = ecryptfs_readlink_lower(dentry, &target, &targetsiz);
1015 if (!rc) {
1016 kfree(target);
1017 stat->size = targetsiz;
1018 }
1019 }
1020 return rc;
1021}
1022
1019int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, 1023int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
1020 struct kstat *stat) 1024 struct kstat *stat)
1021{ 1025{
@@ -1040,7 +1044,7 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
1040 1044
1041 lower_dentry = ecryptfs_dentry_to_lower(dentry); 1045 lower_dentry = ecryptfs_dentry_to_lower(dentry);
1042 if (!lower_dentry->d_inode->i_op->setxattr) { 1046 if (!lower_dentry->d_inode->i_op->setxattr) {
1043 rc = -ENOSYS; 1047 rc = -EOPNOTSUPP;
1044 goto out; 1048 goto out;
1045 } 1049 }
1046 mutex_lock(&lower_dentry->d_inode->i_mutex); 1050 mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1058,7 +1062,7 @@ ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name,
1058 int rc = 0; 1062 int rc = 0;
1059 1063
1060 if (!lower_dentry->d_inode->i_op->getxattr) { 1064 if (!lower_dentry->d_inode->i_op->getxattr) {
1061 rc = -ENOSYS; 1065 rc = -EOPNOTSUPP;
1062 goto out; 1066 goto out;
1063 } 1067 }
1064 mutex_lock(&lower_dentry->d_inode->i_mutex); 1068 mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1085,7 +1089,7 @@ ecryptfs_listxattr(struct dentry *dentry, char *list, size_t size)
1085 1089
1086 lower_dentry = ecryptfs_dentry_to_lower(dentry); 1090 lower_dentry = ecryptfs_dentry_to_lower(dentry);
1087 if (!lower_dentry->d_inode->i_op->listxattr) { 1091 if (!lower_dentry->d_inode->i_op->listxattr) {
1088 rc = -ENOSYS; 1092 rc = -EOPNOTSUPP;
1089 goto out; 1093 goto out;
1090 } 1094 }
1091 mutex_lock(&lower_dentry->d_inode->i_mutex); 1095 mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1102,7 +1106,7 @@ static int ecryptfs_removexattr(struct dentry *dentry, const char *name)
1102 1106
1103 lower_dentry = ecryptfs_dentry_to_lower(dentry); 1107 lower_dentry = ecryptfs_dentry_to_lower(dentry);
1104 if (!lower_dentry->d_inode->i_op->removexattr) { 1108 if (!lower_dentry->d_inode->i_op->removexattr) {
1105 rc = -ENOSYS; 1109 rc = -EOPNOTSUPP;
1106 goto out; 1110 goto out;
1107 } 1111 }
1108 mutex_lock(&lower_dentry->d_inode->i_mutex); 1112 mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1133,6 +1137,7 @@ const struct inode_operations ecryptfs_symlink_iops = {
1133 .put_link = ecryptfs_put_link, 1137 .put_link = ecryptfs_put_link,
1134 .permission = ecryptfs_permission, 1138 .permission = ecryptfs_permission,
1135 .setattr = ecryptfs_setattr, 1139 .setattr = ecryptfs_setattr,
1140 .getattr = ecryptfs_getattr_link,
1136 .setxattr = ecryptfs_setxattr, 1141 .setxattr = ecryptfs_setxattr,
1137 .getxattr = ecryptfs_getxattr, 1142 .getxattr = ecryptfs_getxattr,
1138 .listxattr = ecryptfs_listxattr, 1143 .listxattr = ecryptfs_listxattr,
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index d491237c98e7..2ee9a3a7b68c 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -83,6 +83,19 @@ out:
83 return rc; 83 return rc;
84} 84}
85 85
86static void strip_xattr_flag(char *page_virt,
87 struct ecryptfs_crypt_stat *crypt_stat)
88{
89 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
90 size_t written;
91
92 crypt_stat->flags &= ~ECRYPTFS_METADATA_IN_XATTR;
93 ecryptfs_write_crypt_stat_flags(page_virt, crypt_stat,
94 &written);
95 crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
96 }
97}
98
86/** 99/**
87 * Header Extent: 100 * Header Extent:
88 * Octets 0-7: Unencrypted file size (big-endian) 101 * Octets 0-7: Unencrypted file size (big-endian)
@@ -98,19 +111,6 @@ out:
98 * (big-endian) 111 * (big-endian)
99 * Octet 26: Begin RFC 2440 authentication token packet set 112 * Octet 26: Begin RFC 2440 authentication token packet set
100 */ 113 */
101static void set_header_info(char *page_virt,
102 struct ecryptfs_crypt_stat *crypt_stat)
103{
104 size_t written;
105 size_t save_num_header_bytes_at_front =
106 crypt_stat->num_header_bytes_at_front;
107
108 crypt_stat->num_header_bytes_at_front =
109 ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
110 ecryptfs_write_header_metadata(page_virt + 20, crypt_stat, &written);
111 crypt_stat->num_header_bytes_at_front =
112 save_num_header_bytes_at_front;
113}
114 114
115/** 115/**
116 * ecryptfs_copy_up_encrypted_with_header 116 * ecryptfs_copy_up_encrypted_with_header
@@ -136,8 +136,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
136 * num_extents_per_page) 136 * num_extents_per_page)
137 + extent_num_in_page); 137 + extent_num_in_page);
138 size_t num_header_extents_at_front = 138 size_t num_header_extents_at_front =
139 (crypt_stat->num_header_bytes_at_front 139 (crypt_stat->metadata_size / crypt_stat->extent_size);
140 / crypt_stat->extent_size);
141 140
142 if (view_extent_num < num_header_extents_at_front) { 141 if (view_extent_num < num_header_extents_at_front) {
143 /* This is a header extent */ 142 /* This is a header extent */
@@ -147,9 +146,14 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
147 memset(page_virt, 0, PAGE_CACHE_SIZE); 146 memset(page_virt, 0, PAGE_CACHE_SIZE);
148 /* TODO: Support more than one header extent */ 147 /* TODO: Support more than one header extent */
149 if (view_extent_num == 0) { 148 if (view_extent_num == 0) {
149 size_t written;
150
150 rc = ecryptfs_read_xattr_region( 151 rc = ecryptfs_read_xattr_region(
151 page_virt, page->mapping->host); 152 page_virt, page->mapping->host);
152 set_header_info(page_virt, crypt_stat); 153 strip_xattr_flag(page_virt + 16, crypt_stat);
154 ecryptfs_write_header_metadata(page_virt + 20,
155 crypt_stat,
156 &written);
153 } 157 }
154 kunmap_atomic(page_virt, KM_USER0); 158 kunmap_atomic(page_virt, KM_USER0);
155 flush_dcache_page(page); 159 flush_dcache_page(page);
@@ -162,7 +166,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
162 /* This is an encrypted data extent */ 166 /* This is an encrypted data extent */
163 loff_t lower_offset = 167 loff_t lower_offset =
164 ((view_extent_num * crypt_stat->extent_size) 168 ((view_extent_num * crypt_stat->extent_size)
165 - crypt_stat->num_header_bytes_at_front); 169 - crypt_stat->metadata_size);
166 170
167 rc = ecryptfs_read_lower_page_segment( 171 rc = ecryptfs_read_lower_page_segment(
168 page, (lower_offset >> PAGE_CACHE_SHIFT), 172 page, (lower_offset >> PAGE_CACHE_SHIFT),
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index fcef41c1d2cf..278743c7716a 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -86,7 +86,6 @@ static void ecryptfs_destroy_inode(struct inode *inode)
86 if (lower_dentry->d_inode) { 86 if (lower_dentry->d_inode) {
87 fput(inode_info->lower_file); 87 fput(inode_info->lower_file);
88 inode_info->lower_file = NULL; 88 inode_info->lower_file = NULL;
89 d_drop(lower_dentry);
90 } 89 }
91 } 90 }
92 ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat); 91 ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat);
diff --git a/fs/ext2/symlink.c b/fs/ext2/symlink.c
index 4e2426e22bbe..565cf817bbf1 100644
--- a/fs/ext2/symlink.c
+++ b/fs/ext2/symlink.c
@@ -32,6 +32,7 @@ const struct inode_operations ext2_symlink_inode_operations = {
32 .readlink = generic_readlink, 32 .readlink = generic_readlink,
33 .follow_link = page_follow_link_light, 33 .follow_link = page_follow_link_light,
34 .put_link = page_put_link, 34 .put_link = page_put_link,
35 .setattr = ext2_setattr,
35#ifdef CONFIG_EXT2_FS_XATTR 36#ifdef CONFIG_EXT2_FS_XATTR
36 .setxattr = generic_setxattr, 37 .setxattr = generic_setxattr,
37 .getxattr = generic_getxattr, 38 .getxattr = generic_getxattr,
@@ -43,6 +44,7 @@ const struct inode_operations ext2_symlink_inode_operations = {
43const struct inode_operations ext2_fast_symlink_inode_operations = { 44const struct inode_operations ext2_fast_symlink_inode_operations = {
44 .readlink = generic_readlink, 45 .readlink = generic_readlink,
45 .follow_link = ext2_follow_link, 46 .follow_link = ext2_follow_link,
47 .setattr = ext2_setattr,
46#ifdef CONFIG_EXT2_FS_XATTR 48#ifdef CONFIG_EXT2_FS_XATTR
47 .setxattr = generic_setxattr, 49 .setxattr = generic_setxattr,
48 .getxattr = generic_getxattr, 50 .getxattr = generic_getxattr,
diff --git a/fs/ext3/symlink.c b/fs/ext3/symlink.c
index ff7b4ccd8983..7c4898207776 100644
--- a/fs/ext3/symlink.c
+++ b/fs/ext3/symlink.c
@@ -34,6 +34,7 @@ const struct inode_operations ext3_symlink_inode_operations = {
34 .readlink = generic_readlink, 34 .readlink = generic_readlink,
35 .follow_link = page_follow_link_light, 35 .follow_link = page_follow_link_light,
36 .put_link = page_put_link, 36 .put_link = page_put_link,
37 .setattr = ext3_setattr,
37#ifdef CONFIG_EXT3_FS_XATTR 38#ifdef CONFIG_EXT3_FS_XATTR
38 .setxattr = generic_setxattr, 39 .setxattr = generic_setxattr,
39 .getxattr = generic_getxattr, 40 .getxattr = generic_getxattr,
@@ -45,6 +46,7 @@ const struct inode_operations ext3_symlink_inode_operations = {
45const struct inode_operations ext3_fast_symlink_inode_operations = { 46const struct inode_operations ext3_fast_symlink_inode_operations = {
46 .readlink = generic_readlink, 47 .readlink = generic_readlink,
47 .follow_link = ext3_follow_link, 48 .follow_link = ext3_follow_link,
49 .setattr = ext3_setattr,
48#ifdef CONFIG_EXT3_FS_XATTR 50#ifdef CONFIG_EXT3_FS_XATTR
49 .setxattr = generic_setxattr, 51 .setxattr = generic_setxattr,
50 .getxattr = generic_getxattr, 52 .getxattr = generic_getxattr,
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 452d02f9075e..0a140741b39e 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -614,9 +614,15 @@ int send_sigurg(struct fown_struct *fown)
614 return ret; 614 return ret;
615} 615}
616 616
617static DEFINE_RWLOCK(fasync_lock); 617static DEFINE_SPINLOCK(fasync_lock);
618static struct kmem_cache *fasync_cache __read_mostly; 618static struct kmem_cache *fasync_cache __read_mostly;
619 619
620static void fasync_free_rcu(struct rcu_head *head)
621{
622 kmem_cache_free(fasync_cache,
623 container_of(head, struct fasync_struct, fa_rcu));
624}
625
620/* 626/*
621 * Remove a fasync entry. If successfully removed, return 627 * Remove a fasync entry. If successfully removed, return
622 * positive and clear the FASYNC flag. If no entry exists, 628 * positive and clear the FASYNC flag. If no entry exists,
@@ -625,8 +631,6 @@ static struct kmem_cache *fasync_cache __read_mostly;
625 * NOTE! It is very important that the FASYNC flag always 631 * NOTE! It is very important that the FASYNC flag always
626 * match the state "is the filp on a fasync list". 632 * match the state "is the filp on a fasync list".
627 * 633 *
628 * We always take the 'filp->f_lock', in since fasync_lock
629 * needs to be irq-safe.
630 */ 634 */
631static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp) 635static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
632{ 636{
@@ -634,17 +638,22 @@ static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
634 int result = 0; 638 int result = 0;
635 639
636 spin_lock(&filp->f_lock); 640 spin_lock(&filp->f_lock);
637 write_lock_irq(&fasync_lock); 641 spin_lock(&fasync_lock);
638 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { 642 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
639 if (fa->fa_file != filp) 643 if (fa->fa_file != filp)
640 continue; 644 continue;
645
646 spin_lock_irq(&fa->fa_lock);
647 fa->fa_file = NULL;
648 spin_unlock_irq(&fa->fa_lock);
649
641 *fp = fa->fa_next; 650 *fp = fa->fa_next;
642 kmem_cache_free(fasync_cache, fa); 651 call_rcu(&fa->fa_rcu, fasync_free_rcu);
643 filp->f_flags &= ~FASYNC; 652 filp->f_flags &= ~FASYNC;
644 result = 1; 653 result = 1;
645 break; 654 break;
646 } 655 }
647 write_unlock_irq(&fasync_lock); 656 spin_unlock(&fasync_lock);
648 spin_unlock(&filp->f_lock); 657 spin_unlock(&filp->f_lock);
649 return result; 658 return result;
650} 659}
@@ -666,25 +675,30 @@ static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fa
666 return -ENOMEM; 675 return -ENOMEM;
667 676
668 spin_lock(&filp->f_lock); 677 spin_lock(&filp->f_lock);
669 write_lock_irq(&fasync_lock); 678 spin_lock(&fasync_lock);
670 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { 679 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
671 if (fa->fa_file != filp) 680 if (fa->fa_file != filp)
672 continue; 681 continue;
682
683 spin_lock_irq(&fa->fa_lock);
673 fa->fa_fd = fd; 684 fa->fa_fd = fd;
685 spin_unlock_irq(&fa->fa_lock);
686
674 kmem_cache_free(fasync_cache, new); 687 kmem_cache_free(fasync_cache, new);
675 goto out; 688 goto out;
676 } 689 }
677 690
691 spin_lock_init(&new->fa_lock);
678 new->magic = FASYNC_MAGIC; 692 new->magic = FASYNC_MAGIC;
679 new->fa_file = filp; 693 new->fa_file = filp;
680 new->fa_fd = fd; 694 new->fa_fd = fd;
681 new->fa_next = *fapp; 695 new->fa_next = *fapp;
682 *fapp = new; 696 rcu_assign_pointer(*fapp, new);
683 result = 1; 697 result = 1;
684 filp->f_flags |= FASYNC; 698 filp->f_flags |= FASYNC;
685 699
686out: 700out:
687 write_unlock_irq(&fasync_lock); 701 spin_unlock(&fasync_lock);
688 spin_unlock(&filp->f_lock); 702 spin_unlock(&filp->f_lock);
689 return result; 703 return result;
690} 704}
@@ -704,37 +718,41 @@ int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fap
704 718
705EXPORT_SYMBOL(fasync_helper); 719EXPORT_SYMBOL(fasync_helper);
706 720
707void __kill_fasync(struct fasync_struct *fa, int sig, int band) 721/*
722 * rcu_read_lock() is held
723 */
724static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
708{ 725{
709 while (fa) { 726 while (fa) {
710 struct fown_struct * fown; 727 struct fown_struct *fown;
711 if (fa->magic != FASYNC_MAGIC) { 728 if (fa->magic != FASYNC_MAGIC) {
712 printk(KERN_ERR "kill_fasync: bad magic number in " 729 printk(KERN_ERR "kill_fasync: bad magic number in "
713 "fasync_struct!\n"); 730 "fasync_struct!\n");
714 return; 731 return;
715 } 732 }
716 fown = &fa->fa_file->f_owner; 733 spin_lock(&fa->fa_lock);
717 /* Don't send SIGURG to processes which have not set a 734 if (fa->fa_file) {
718 queued signum: SIGURG has its own default signalling 735 fown = &fa->fa_file->f_owner;
719 mechanism. */ 736 /* Don't send SIGURG to processes which have not set a
720 if (!(sig == SIGURG && fown->signum == 0)) 737 queued signum: SIGURG has its own default signalling
721 send_sigio(fown, fa->fa_fd, band); 738 mechanism. */
722 fa = fa->fa_next; 739 if (!(sig == SIGURG && fown->signum == 0))
740 send_sigio(fown, fa->fa_fd, band);
741 }
742 spin_unlock(&fa->fa_lock);
743 fa = rcu_dereference(fa->fa_next);
723 } 744 }
724} 745}
725 746
726EXPORT_SYMBOL(__kill_fasync);
727
728void kill_fasync(struct fasync_struct **fp, int sig, int band) 747void kill_fasync(struct fasync_struct **fp, int sig, int band)
729{ 748{
730 /* First a quick test without locking: usually 749 /* First a quick test without locking: usually
731 * the list is empty. 750 * the list is empty.
732 */ 751 */
733 if (*fp) { 752 if (*fp) {
734 read_lock(&fasync_lock); 753 rcu_read_lock();
735 /* reread *fp after obtaining the lock */ 754 kill_fasync_rcu(rcu_dereference(*fp), sig, band);
736 __kill_fasync(*fp, sig, band); 755 rcu_read_unlock();
737 read_unlock(&fasync_lock);
738 } 756 }
739} 757}
740EXPORT_SYMBOL(kill_fasync); 758EXPORT_SYMBOL(kill_fasync);
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 2a3d352c0bff..a8766c4ef2e0 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -1294,7 +1294,8 @@ static int nfs4_init_server(struct nfs_server *server,
1294 1294
1295 /* Initialise the client representation from the mount data */ 1295 /* Initialise the client representation from the mount data */
1296 server->flags = data->flags; 1296 server->flags = data->flags;
1297 server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR; 1297 server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR|
1298 NFS_CAP_POSIX_LOCK;
1298 server->options = data->options; 1299 server->options = data->options;
1299 1300
1300 /* Get a client record */ 1301 /* Get a client record */
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index c6f2750648f4..be46f26c9a56 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1025,12 +1025,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
1025 res = NULL; 1025 res = NULL;
1026 goto out; 1026 goto out;
1027 /* This turned out not to be a regular file */ 1027 /* This turned out not to be a regular file */
1028 case -EISDIR:
1028 case -ENOTDIR: 1029 case -ENOTDIR:
1029 goto no_open; 1030 goto no_open;
1030 case -ELOOP: 1031 case -ELOOP:
1031 if (!(nd->intent.open.flags & O_NOFOLLOW)) 1032 if (!(nd->intent.open.flags & O_NOFOLLOW))
1032 goto no_open; 1033 goto no_open;
1033 /* case -EISDIR: */
1034 /* case -EINVAL: */ 1034 /* case -EINVAL: */
1035 default: 1035 default:
1036 goto out; 1036 goto out;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 737128f777f3..50a56edca0b5 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -623,10 +623,10 @@ struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_c
623 list_for_each_entry(pos, &nfsi->open_files, list) { 623 list_for_each_entry(pos, &nfsi->open_files, list) {
624 if (cred != NULL && pos->cred != cred) 624 if (cred != NULL && pos->cred != cred)
625 continue; 625 continue;
626 if ((pos->mode & mode) == mode) { 626 if ((pos->mode & (FMODE_READ|FMODE_WRITE)) != mode)
627 ctx = get_nfs_open_context(pos); 627 continue;
628 break; 628 ctx = get_nfs_open_context(pos);
629 } 629 break;
630 } 630 }
631 spin_unlock(&inode->i_lock); 631 spin_unlock(&inode->i_lock);
632 return ctx; 632 return ctx;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index fe0cd9eb1d4d..638067007c65 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1523,6 +1523,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
1523 nfs_post_op_update_inode(dir, o_res->dir_attr); 1523 nfs_post_op_update_inode(dir, o_res->dir_attr);
1524 } else 1524 } else
1525 nfs_refresh_inode(dir, o_res->dir_attr); 1525 nfs_refresh_inode(dir, o_res->dir_attr);
1526 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
1527 server->caps &= ~NFS_CAP_POSIX_LOCK;
1526 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 1528 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1527 status = _nfs4_proc_open_confirm(data); 1529 status = _nfs4_proc_open_confirm(data);
1528 if (status != 0) 1530 if (status != 0)
@@ -1664,7 +1666,7 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in
1664 status = PTR_ERR(state); 1666 status = PTR_ERR(state);
1665 if (IS_ERR(state)) 1667 if (IS_ERR(state))
1666 goto err_opendata_put; 1668 goto err_opendata_put;
1667 if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0) 1669 if (server->caps & NFS_CAP_POSIX_LOCK)
1668 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 1670 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
1669 nfs4_opendata_put(opendata); 1671 nfs4_opendata_put(opendata);
1670 nfs4_put_state_owner(sp); 1672 nfs4_put_state_owner(sp);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 53ff70e23993..de38d63aa920 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -201,6 +201,7 @@ static int nfs_set_page_writeback(struct page *page)
201 struct inode *inode = page->mapping->host; 201 struct inode *inode = page->mapping->host;
202 struct nfs_server *nfss = NFS_SERVER(inode); 202 struct nfs_server *nfss = NFS_SERVER(inode);
203 203
204 page_cache_get(page);
204 if (atomic_long_inc_return(&nfss->writeback) > 205 if (atomic_long_inc_return(&nfss->writeback) >
205 NFS_CONGESTION_ON_THRESH) { 206 NFS_CONGESTION_ON_THRESH) {
206 set_bdi_congested(&nfss->backing_dev_info, 207 set_bdi_congested(&nfss->backing_dev_info,
@@ -216,6 +217,7 @@ static void nfs_end_page_writeback(struct page *page)
216 struct nfs_server *nfss = NFS_SERVER(inode); 217 struct nfs_server *nfss = NFS_SERVER(inode);
217 218
218 end_page_writeback(page); 219 end_page_writeback(page);
220 page_cache_release(page);
219 if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) 221 if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
220 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); 222 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
221} 223}
@@ -421,6 +423,7 @@ static void
421nfs_mark_request_dirty(struct nfs_page *req) 423nfs_mark_request_dirty(struct nfs_page *req)
422{ 424{
423 __set_page_dirty_nobuffers(req->wb_page); 425 __set_page_dirty_nobuffers(req->wb_page);
426 __mark_inode_dirty(req->wb_page->mapping->host, I_DIRTY_DATASYNC);
424} 427}
425 428
426#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 429#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
@@ -660,9 +663,11 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
660 req = nfs_setup_write_request(ctx, page, offset, count); 663 req = nfs_setup_write_request(ctx, page, offset, count);
661 if (IS_ERR(req)) 664 if (IS_ERR(req))
662 return PTR_ERR(req); 665 return PTR_ERR(req);
666 nfs_mark_request_dirty(req);
663 /* Update file length */ 667 /* Update file length */
664 nfs_grow_file(page, offset, count); 668 nfs_grow_file(page, offset, count);
665 nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); 669 nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
670 nfs_mark_request_dirty(req);
666 nfs_clear_page_tag_locked(req); 671 nfs_clear_page_tag_locked(req);
667 return 0; 672 return 0;
668} 673}
@@ -739,8 +744,6 @@ int nfs_updatepage(struct file *file, struct page *page,
739 status = nfs_writepage_setup(ctx, page, offset, count); 744 status = nfs_writepage_setup(ctx, page, offset, count);
740 if (status < 0) 745 if (status < 0)
741 nfs_set_pageerror(page); 746 nfs_set_pageerror(page);
742 else
743 __set_page_dirty_nobuffers(page);
744 747
745 dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", 748 dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n",
746 status, (long long)i_size_read(inode)); 749 status, (long long)i_size_read(inode));
@@ -749,13 +752,12 @@ int nfs_updatepage(struct file *file, struct page *page,
749 752
750static void nfs_writepage_release(struct nfs_page *req) 753static void nfs_writepage_release(struct nfs_page *req)
751{ 754{
755 struct page *page = req->wb_page;
752 756
753 if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) { 757 if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req))
754 nfs_end_page_writeback(req->wb_page);
755 nfs_inode_remove_request(req); 758 nfs_inode_remove_request(req);
756 } else
757 nfs_end_page_writeback(req->wb_page);
758 nfs_clear_page_tag_locked(req); 759 nfs_clear_page_tag_locked(req);
760 nfs_end_page_writeback(page);
759} 761}
760 762
761static int flush_task_priority(int how) 763static int flush_task_priority(int how)
@@ -779,7 +781,6 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
779 int how) 781 int how)
780{ 782{
781 struct inode *inode = req->wb_context->path.dentry->d_inode; 783 struct inode *inode = req->wb_context->path.dentry->d_inode;
782 int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
783 int priority = flush_task_priority(how); 784 int priority = flush_task_priority(how);
784 struct rpc_task *task; 785 struct rpc_task *task;
785 struct rpc_message msg = { 786 struct rpc_message msg = {
@@ -794,9 +795,10 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
794 .callback_ops = call_ops, 795 .callback_ops = call_ops,
795 .callback_data = data, 796 .callback_data = data,
796 .workqueue = nfsiod_workqueue, 797 .workqueue = nfsiod_workqueue,
797 .flags = flags, 798 .flags = RPC_TASK_ASYNC,
798 .priority = priority, 799 .priority = priority,
799 }; 800 };
801 int ret = 0;
800 802
801 /* Set up the RPC argument and reply structs 803 /* Set up the RPC argument and reply structs
802 * NB: take care not to mess about with data->commit et al. */ 804 * NB: take care not to mess about with data->commit et al. */
@@ -835,10 +837,18 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
835 (unsigned long long)data->args.offset); 837 (unsigned long long)data->args.offset);
836 838
837 task = rpc_run_task(&task_setup_data); 839 task = rpc_run_task(&task_setup_data);
838 if (IS_ERR(task)) 840 if (IS_ERR(task)) {
839 return PTR_ERR(task); 841 ret = PTR_ERR(task);
842 goto out;
843 }
844 if (how & FLUSH_SYNC) {
845 ret = rpc_wait_for_completion_task(task);
846 if (ret == 0)
847 ret = task->tk_status;
848 }
840 rpc_put_task(task); 849 rpc_put_task(task);
841 return 0; 850out:
851 return ret;
842} 852}
843 853
844/* If a nfs_flush_* function fails, it should remove reqs from @head and 854/* If a nfs_flush_* function fails, it should remove reqs from @head and
@@ -847,9 +857,11 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
847 */ 857 */
848static void nfs_redirty_request(struct nfs_page *req) 858static void nfs_redirty_request(struct nfs_page *req)
849{ 859{
860 struct page *page = req->wb_page;
861
850 nfs_mark_request_dirty(req); 862 nfs_mark_request_dirty(req);
851 nfs_end_page_writeback(req->wb_page);
852 nfs_clear_page_tag_locked(req); 863 nfs_clear_page_tag_locked(req);
864 nfs_end_page_writeback(page);
853} 865}
854 866
855/* 867/*
@@ -1084,16 +1096,15 @@ static void nfs_writeback_release_full(void *calldata)
1084 if (nfs_write_need_commit(data)) { 1096 if (nfs_write_need_commit(data)) {
1085 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); 1097 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1086 nfs_mark_request_commit(req); 1098 nfs_mark_request_commit(req);
1087 nfs_end_page_writeback(page);
1088 dprintk(" marked for commit\n"); 1099 dprintk(" marked for commit\n");
1089 goto next; 1100 goto next;
1090 } 1101 }
1091 dprintk(" OK\n"); 1102 dprintk(" OK\n");
1092remove_request: 1103remove_request:
1093 nfs_end_page_writeback(page);
1094 nfs_inode_remove_request(req); 1104 nfs_inode_remove_request(req);
1095 next: 1105 next:
1096 nfs_clear_page_tag_locked(req); 1106 nfs_clear_page_tag_locked(req);
1107 nfs_end_page_writeback(page);
1097 } 1108 }
1098 nfs_writedata_release(calldata); 1109 nfs_writedata_release(calldata);
1099} 1110}
@@ -1207,7 +1218,6 @@ static int nfs_commit_rpcsetup(struct list_head *head,
1207{ 1218{
1208 struct nfs_page *first = nfs_list_entry(head->next); 1219 struct nfs_page *first = nfs_list_entry(head->next);
1209 struct inode *inode = first->wb_context->path.dentry->d_inode; 1220 struct inode *inode = first->wb_context->path.dentry->d_inode;
1210 int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
1211 int priority = flush_task_priority(how); 1221 int priority = flush_task_priority(how);
1212 struct rpc_task *task; 1222 struct rpc_task *task;
1213 struct rpc_message msg = { 1223 struct rpc_message msg = {
@@ -1222,7 +1232,7 @@ static int nfs_commit_rpcsetup(struct list_head *head,
1222 .callback_ops = &nfs_commit_ops, 1232 .callback_ops = &nfs_commit_ops,
1223 .callback_data = data, 1233 .callback_data = data,
1224 .workqueue = nfsiod_workqueue, 1234 .workqueue = nfsiod_workqueue,
1225 .flags = flags, 1235 .flags = RPC_TASK_ASYNC,
1226 .priority = priority, 1236 .priority = priority,
1227 }; 1237 };
1228 1238
@@ -1252,6 +1262,8 @@ static int nfs_commit_rpcsetup(struct list_head *head,
1252 task = rpc_run_task(&task_setup_data); 1262 task = rpc_run_task(&task_setup_data);
1253 if (IS_ERR(task)) 1263 if (IS_ERR(task))
1254 return PTR_ERR(task); 1264 return PTR_ERR(task);
1265 if (how & FLUSH_SYNC)
1266 rpc_wait_for_completion_task(task);
1255 rpc_put_task(task); 1267 rpc_put_task(task);
1256 return 0; 1268 return 0;
1257} 1269}
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index 8d6356a804f3..7cfb87e692da 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -426,7 +426,7 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode,
426 bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh); 426 bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
427 if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group), 427 if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group),
428 group_offset, bitmap)) 428 group_offset, bitmap))
429 printk(KERN_WARNING "%s: entry numer %llu already freed\n", 429 printk(KERN_WARNING "%s: entry number %llu already freed\n",
430 __func__, (unsigned long long)req->pr_entry_nr); 430 __func__, (unsigned long long)req->pr_entry_nr);
431 431
432 nilfs_palloc_group_desc_add_entries(inode, group, desc, 1); 432 nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 7cdd98b8d514..76c38e3e19d2 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -1879,7 +1879,7 @@ static int nilfs_btree_propagate_v(struct nilfs_btree *btree,
1879 struct nilfs_btree_path *path, 1879 struct nilfs_btree_path *path,
1880 int level, struct buffer_head *bh) 1880 int level, struct buffer_head *bh)
1881{ 1881{
1882 int maxlevel, ret; 1882 int maxlevel = 0, ret;
1883 struct nilfs_btree_node *parent; 1883 struct nilfs_btree_node *parent;
1884 struct inode *dat = nilfs_bmap_get_dat(&btree->bt_bmap); 1884 struct inode *dat = nilfs_bmap_get_dat(&btree->bt_bmap);
1885 __u64 ptr; 1885 __u64 ptr;
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index c2ff1b306012..f90a33d9a5b0 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -649,7 +649,7 @@ static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp,
649long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 649long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
650{ 650{
651 struct inode *inode = filp->f_dentry->d_inode; 651 struct inode *inode = filp->f_dentry->d_inode;
652 void __user *argp = (void * __user *)arg; 652 void __user *argp = (void __user *)arg;
653 653
654 switch (cmd) { 654 switch (cmd) {
655 case NILFS_IOCTL_CHANGE_CPMODE: 655 case NILFS_IOCTL_CHANGE_CPMODE:
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
index dad7fb247ddc..3e21b1e2ad3a 100644
--- a/fs/quota/Kconfig
+++ b/fs/quota/Kconfig
@@ -33,6 +33,14 @@ config PRINT_QUOTA_WARNING
33 Note that this behavior is currently deprecated and may go away in 33 Note that this behavior is currently deprecated and may go away in
34 future. Please use notification via netlink socket instead. 34 future. Please use notification via netlink socket instead.
35 35
36config QUOTA_DEBUG
37 bool "Additional quota sanity checks"
38 depends on QUOTA
39 default n
40 help
41 If you say Y here, quota subsystem will perform some additional
42 sanity checks of quota internal structures. If unsure, say N.
43
36# Generic support for tree structured quota files. Selected when needed. 44# Generic support for tree structured quota files. Selected when needed.
37config QUOTA_TREE 45config QUOTA_TREE
38 tristate 46 tristate
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index e0b870f4749f..788b5802a7ce 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -80,8 +80,6 @@
80 80
81#include <asm/uaccess.h> 81#include <asm/uaccess.h>
82 82
83#define __DQUOT_PARANOIA
84
85/* 83/*
86 * There are three quota SMP locks. dq_list_lock protects all lists with quotas 84 * There are three quota SMP locks. dq_list_lock protects all lists with quotas
87 * and quota formats, dqstats structure containing statistics about the lists 85 * and quota formats, dqstats structure containing statistics about the lists
@@ -695,7 +693,7 @@ void dqput(struct dquot *dquot)
695 693
696 if (!dquot) 694 if (!dquot)
697 return; 695 return;
698#ifdef __DQUOT_PARANOIA 696#ifdef CONFIG_QUOTA_DEBUG
699 if (!atomic_read(&dquot->dq_count)) { 697 if (!atomic_read(&dquot->dq_count)) {
700 printk("VFS: dqput: trying to free free dquot\n"); 698 printk("VFS: dqput: trying to free free dquot\n");
701 printk("VFS: device %s, dquot of %s %d\n", 699 printk("VFS: device %s, dquot of %s %d\n",
@@ -748,7 +746,7 @@ we_slept:
748 goto we_slept; 746 goto we_slept;
749 } 747 }
750 atomic_dec(&dquot->dq_count); 748 atomic_dec(&dquot->dq_count);
751#ifdef __DQUOT_PARANOIA 749#ifdef CONFIG_QUOTA_DEBUG
752 /* sanity check */ 750 /* sanity check */
753 BUG_ON(!list_empty(&dquot->dq_free)); 751 BUG_ON(!list_empty(&dquot->dq_free));
754#endif 752#endif
@@ -845,7 +843,7 @@ we_slept:
845 dquot = NULL; 843 dquot = NULL;
846 goto out; 844 goto out;
847 } 845 }
848#ifdef __DQUOT_PARANOIA 846#ifdef CONFIG_QUOTA_DEBUG
849 BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */ 847 BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
850#endif 848#endif
851out: 849out:
@@ -874,14 +872,18 @@ static int dqinit_needed(struct inode *inode, int type)
874static void add_dquot_ref(struct super_block *sb, int type) 872static void add_dquot_ref(struct super_block *sb, int type)
875{ 873{
876 struct inode *inode, *old_inode = NULL; 874 struct inode *inode, *old_inode = NULL;
875#ifdef CONFIG_QUOTA_DEBUG
877 int reserved = 0; 876 int reserved = 0;
877#endif
878 878
879 spin_lock(&inode_lock); 879 spin_lock(&inode_lock);
880 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 880 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
881 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) 881 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
882 continue; 882 continue;
883#ifdef CONFIG_QUOTA_DEBUG
883 if (unlikely(inode_get_rsv_space(inode) > 0)) 884 if (unlikely(inode_get_rsv_space(inode) > 0))
884 reserved = 1; 885 reserved = 1;
886#endif
885 if (!atomic_read(&inode->i_writecount)) 887 if (!atomic_read(&inode->i_writecount))
886 continue; 888 continue;
887 if (!dqinit_needed(inode, type)) 889 if (!dqinit_needed(inode, type))
@@ -903,11 +905,13 @@ static void add_dquot_ref(struct super_block *sb, int type)
903 spin_unlock(&inode_lock); 905 spin_unlock(&inode_lock);
904 iput(old_inode); 906 iput(old_inode);
905 907
908#ifdef CONFIG_QUOTA_DEBUG
906 if (reserved) { 909 if (reserved) {
907 printk(KERN_WARNING "VFS (%s): Writes happened before quota" 910 printk(KERN_WARNING "VFS (%s): Writes happened before quota"
908 " was turned on thus quota information is probably " 911 " was turned on thus quota information is probably "
909 "inconsistent. Please run quotacheck(8).\n", sb->s_id); 912 "inconsistent. Please run quotacheck(8).\n", sb->s_id);
910 } 913 }
914#endif
911} 915}
912 916
913/* 917/*
@@ -934,7 +938,7 @@ static int remove_inode_dquot_ref(struct inode *inode, int type,
934 inode->i_dquot[type] = NULL; 938 inode->i_dquot[type] = NULL;
935 if (dquot) { 939 if (dquot) {
936 if (dqput_blocks(dquot)) { 940 if (dqput_blocks(dquot)) {
937#ifdef __DQUOT_PARANOIA 941#ifdef CONFIG_QUOTA_DEBUG
938 if (atomic_read(&dquot->dq_count) != 1) 942 if (atomic_read(&dquot->dq_count) != 1)
939 printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count)); 943 printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count));
940#endif 944#endif
@@ -2322,34 +2326,34 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
2322 if (di->dqb_valid & QIF_SPACE) { 2326 if (di->dqb_valid & QIF_SPACE) {
2323 dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace; 2327 dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace;
2324 check_blim = 1; 2328 check_blim = 1;
2325 __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); 2329 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2326 } 2330 }
2327 if (di->dqb_valid & QIF_BLIMITS) { 2331 if (di->dqb_valid & QIF_BLIMITS) {
2328 dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit); 2332 dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit);
2329 dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit); 2333 dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit);
2330 check_blim = 1; 2334 check_blim = 1;
2331 __set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); 2335 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2332 } 2336 }
2333 if (di->dqb_valid & QIF_INODES) { 2337 if (di->dqb_valid & QIF_INODES) {
2334 dm->dqb_curinodes = di->dqb_curinodes; 2338 dm->dqb_curinodes = di->dqb_curinodes;
2335 check_ilim = 1; 2339 check_ilim = 1;
2336 __set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); 2340 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2337 } 2341 }
2338 if (di->dqb_valid & QIF_ILIMITS) { 2342 if (di->dqb_valid & QIF_ILIMITS) {
2339 dm->dqb_isoftlimit = di->dqb_isoftlimit; 2343 dm->dqb_isoftlimit = di->dqb_isoftlimit;
2340 dm->dqb_ihardlimit = di->dqb_ihardlimit; 2344 dm->dqb_ihardlimit = di->dqb_ihardlimit;
2341 check_ilim = 1; 2345 check_ilim = 1;
2342 __set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); 2346 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2343 } 2347 }
2344 if (di->dqb_valid & QIF_BTIME) { 2348 if (di->dqb_valid & QIF_BTIME) {
2345 dm->dqb_btime = di->dqb_btime; 2349 dm->dqb_btime = di->dqb_btime;
2346 check_blim = 1; 2350 check_blim = 1;
2347 __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); 2351 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2348 } 2352 }
2349 if (di->dqb_valid & QIF_ITIME) { 2353 if (di->dqb_valid & QIF_ITIME) {
2350 dm->dqb_itime = di->dqb_itime; 2354 dm->dqb_itime = di->dqb_itime;
2351 check_ilim = 1; 2355 check_ilim = 1;
2352 __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); 2356 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2353 } 2357 }
2354 2358
2355 if (check_blim) { 2359 if (check_blim) {
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 19626e2491c4..9a9378b4eb5a 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -125,9 +125,8 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
125 125
126 mutex_lock(&sbi->s_alloc_mutex); 126 mutex_lock(&sbi->s_alloc_mutex);
127 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; 127 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
128 if (bloc->logicalBlockNum < 0 || 128 if (bloc->logicalBlockNum + count < count ||
129 (bloc->logicalBlockNum + count) > 129 (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
130 partmap->s_partition_len) {
131 udf_debug("%d < %d || %d + %d > %d\n", 130 udf_debug("%d < %d || %d + %d > %d\n",
132 bloc->logicalBlockNum, 0, bloc->logicalBlockNum, 131 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
133 count, partmap->s_partition_len); 132 count, partmap->s_partition_len);
@@ -393,9 +392,8 @@ static void udf_table_free_blocks(struct super_block *sb,
393 392
394 mutex_lock(&sbi->s_alloc_mutex); 393 mutex_lock(&sbi->s_alloc_mutex);
395 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; 394 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
396 if (bloc->logicalBlockNum < 0 || 395 if (bloc->logicalBlockNum + count < count ||
397 (bloc->logicalBlockNum + count) > 396 (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
398 partmap->s_partition_len) {
399 udf_debug("%d < %d || %d + %d > %d\n", 397 udf_debug("%d < %d || %d + %d > %d\n",
400 bloc->logicalBlockNum, 0, bloc->logicalBlockNum, count, 398 bloc->logicalBlockNum, 0, bloc->logicalBlockNum, count,
401 partmap->s_partition_len); 399 partmap->s_partition_len);
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 1eb06774ed90..4b6a46ccbf46 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -218,7 +218,7 @@ const struct file_operations udf_file_operations = {
218 .llseek = generic_file_llseek, 218 .llseek = generic_file_llseek,
219}; 219};
220 220
221static int udf_setattr(struct dentry *dentry, struct iattr *iattr) 221int udf_setattr(struct dentry *dentry, struct iattr *iattr)
222{ 222{
223 struct inode *inode = dentry->d_inode; 223 struct inode *inode = dentry->d_inode;
224 int error; 224 int error;
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index bb863fe579ac..8a3fbd177cab 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1314,7 +1314,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1314 break; 1314 break;
1315 case ICBTAG_FILE_TYPE_SYMLINK: 1315 case ICBTAG_FILE_TYPE_SYMLINK:
1316 inode->i_data.a_ops = &udf_symlink_aops; 1316 inode->i_data.a_ops = &udf_symlink_aops;
1317 inode->i_op = &page_symlink_inode_operations; 1317 inode->i_op = &udf_symlink_inode_operations;
1318 inode->i_mode = S_IFLNK | S_IRWXUGO; 1318 inode->i_mode = S_IFLNK | S_IRWXUGO;
1319 break; 1319 break;
1320 case ICBTAG_FILE_TYPE_MAIN: 1320 case ICBTAG_FILE_TYPE_MAIN:
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index db423ab078b1..75816025f95f 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -925,7 +925,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
925 iinfo = UDF_I(inode); 925 iinfo = UDF_I(inode);
926 inode->i_mode = S_IFLNK | S_IRWXUGO; 926 inode->i_mode = S_IFLNK | S_IRWXUGO;
927 inode->i_data.a_ops = &udf_symlink_aops; 927 inode->i_data.a_ops = &udf_symlink_aops;
928 inode->i_op = &page_symlink_inode_operations; 928 inode->i_op = &udf_symlink_inode_operations;
929 929
930 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { 930 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
931 struct kernel_lb_addr eloc; 931 struct kernel_lb_addr eloc;
@@ -1393,6 +1393,7 @@ const struct export_operations udf_export_ops = {
1393const struct inode_operations udf_dir_inode_operations = { 1393const struct inode_operations udf_dir_inode_operations = {
1394 .lookup = udf_lookup, 1394 .lookup = udf_lookup,
1395 .create = udf_create, 1395 .create = udf_create,
1396 .setattr = udf_setattr,
1396 .link = udf_link, 1397 .link = udf_link,
1397 .unlink = udf_unlink, 1398 .unlink = udf_unlink,
1398 .symlink = udf_symlink, 1399 .symlink = udf_symlink,
@@ -1401,3 +1402,9 @@ const struct inode_operations udf_dir_inode_operations = {
1401 .mknod = udf_mknod, 1402 .mknod = udf_mknod,
1402 .rename = udf_rename, 1403 .rename = udf_rename,
1403}; 1404};
1405const struct inode_operations udf_symlink_inode_operations = {
1406 .readlink = generic_readlink,
1407 .follow_link = page_follow_link_light,
1408 .put_link = page_put_link,
1409 .setattr = udf_setattr,
1410};
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 4223ac855da9..702a1148e702 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -76,6 +76,7 @@ extern const struct inode_operations udf_dir_inode_operations;
76extern const struct file_operations udf_dir_operations; 76extern const struct file_operations udf_dir_operations;
77extern const struct inode_operations udf_file_inode_operations; 77extern const struct inode_operations udf_file_inode_operations;
78extern const struct file_operations udf_file_operations; 78extern const struct file_operations udf_file_operations;
79extern const struct inode_operations udf_symlink_inode_operations;
79extern const struct address_space_operations udf_aops; 80extern const struct address_space_operations udf_aops;
80extern const struct address_space_operations udf_adinicb_aops; 81extern const struct address_space_operations udf_adinicb_aops;
81extern const struct address_space_operations udf_symlink_aops; 82extern const struct address_space_operations udf_symlink_aops;
@@ -131,7 +132,7 @@ extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
131/* file.c */ 132/* file.c */
132extern int udf_ioctl(struct inode *, struct file *, unsigned int, 133extern int udf_ioctl(struct inode *, struct file *, unsigned int,
133 unsigned long); 134 unsigned long);
134 135extern int udf_setattr(struct dentry *dentry, struct iattr *iattr);
135/* inode.c */ 136/* inode.c */
136extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *); 137extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *);
137extern int udf_sync_inode(struct inode *); 138extern int udf_sync_inode(struct inode *);
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 05cd85317f6f..fd9698215759 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -820,10 +820,10 @@ xfs_reclaim_inode(
820 * call into reclaim to find it in a clean state instead of waiting for 820 * call into reclaim to find it in a clean state instead of waiting for
821 * it now. We also don't return errors here - if the error is transient 821 * it now. We also don't return errors here - if the error is transient
822 * then the next reclaim pass will flush the inode, and if the error 822 * then the next reclaim pass will flush the inode, and if the error
823 * is permanent then the next sync reclaim will relcaim the inode and 823 * is permanent then the next sync reclaim will reclaim the inode and
824 * pass on the error. 824 * pass on the error.
825 */ 825 */
826 if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount)) { 826 if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) {
827 xfs_fs_cmn_err(CE_WARN, ip->i_mount, 827 xfs_fs_cmn_err(CE_WARN, ip->i_mount,
828 "inode 0x%llx background reclaim flush failed with %d", 828 "inode 0x%llx background reclaim flush failed with %d",
829 (long long)ip->i_ino, error); 829 (long long)ip->i_ino, error);
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index e8fba92d7cd9..2be019136287 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -745,9 +745,16 @@ xfs_log_move_tail(xfs_mount_t *mp,
745 745
746/* 746/*
747 * Determine if we have a transaction that has gone to disk 747 * Determine if we have a transaction that has gone to disk
748 * that needs to be covered. Log activity needs to be idle (no AIL and 748 * that needs to be covered. To begin the transition to the idle state
749 * nothing in the iclogs). And, we need to be in the right state indicating 749 * firstly the log needs to be idle (no AIL and nothing in the iclogs).
750 * something has gone out. 750 * If we are then in a state where covering is needed, the caller is informed
751 * that dummy transactions are required to move the log into the idle state.
752 *
753 * Because this is called as part of the sync process, we should also indicate
754 * that dummy transactions should be issued in anything but the covered or
755 * idle states. This ensures that the log tail is accurately reflected in
756 * the log at the end of the sync, hence if a crash occurrs avoids replay
757 * of transactions where the metadata is already on disk.
751 */ 758 */
752int 759int
753xfs_log_need_covered(xfs_mount_t *mp) 760xfs_log_need_covered(xfs_mount_t *mp)
@@ -759,17 +766,24 @@ xfs_log_need_covered(xfs_mount_t *mp)
759 return 0; 766 return 0;
760 767
761 spin_lock(&log->l_icloglock); 768 spin_lock(&log->l_icloglock);
762 if (((log->l_covered_state == XLOG_STATE_COVER_NEED) || 769 switch (log->l_covered_state) {
763 (log->l_covered_state == XLOG_STATE_COVER_NEED2)) 770 case XLOG_STATE_COVER_DONE:
764 && !xfs_trans_ail_tail(log->l_ailp) 771 case XLOG_STATE_COVER_DONE2:
765 && xlog_iclogs_empty(log)) { 772 case XLOG_STATE_COVER_IDLE:
766 if (log->l_covered_state == XLOG_STATE_COVER_NEED) 773 break;
767 log->l_covered_state = XLOG_STATE_COVER_DONE; 774 case XLOG_STATE_COVER_NEED:
768 else { 775 case XLOG_STATE_COVER_NEED2:
769 ASSERT(log->l_covered_state == XLOG_STATE_COVER_NEED2); 776 if (!xfs_trans_ail_tail(log->l_ailp) &&
770 log->l_covered_state = XLOG_STATE_COVER_DONE2; 777 xlog_iclogs_empty(log)) {
778 if (log->l_covered_state == XLOG_STATE_COVER_NEED)
779 log->l_covered_state = XLOG_STATE_COVER_DONE;
780 else
781 log->l_covered_state = XLOG_STATE_COVER_DONE2;
771 } 782 }
783 /* FALLTHRU */
784 default:
772 needed = 1; 785 needed = 1;
786 break;
773 } 787 }
774 spin_unlock(&log->l_icloglock); 788 spin_unlock(&log->l_icloglock);
775 return needed; 789 return needed;
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 04a6ebc27b96..2d428b088cc8 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -6,6 +6,7 @@
6 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 6 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
7 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 7 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
8 {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 8 {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
9 {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
9 {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ 10 {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
10 {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ 11 {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
11 {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \ 12 {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index e2ea0b2159cd..2fc8e14cc24a 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -94,6 +94,7 @@ header-y += if_ppp.h
94header-y += if_slip.h 94header-y += if_slip.h
95header-y += if_strip.h 95header-y += if_strip.h
96header-y += if_tun.h 96header-y += if_tun.h
97header-y += if_x25.h
97header-y += in_route.h 98header-y += in_route.h
98header-y += ioctl.h 99header-y += ioctl.h
99header-y += ip6_tunnel.h 100header-y += ip6_tunnel.h
diff --git a/include/linux/caif/caif_socket.h b/include/linux/caif/caif_socket.h
index 8e5c8444a3f4..2a61eb1beb85 100644
--- a/include/linux/caif/caif_socket.h
+++ b/include/linux/caif/caif_socket.h
@@ -16,7 +16,6 @@
16#include <sys/socket.h> 16#include <sys/socket.h>
17#endif 17#endif
18 18
19
20/** 19/**
21 * enum caif_link_selector - Physical Link Selection. 20 * enum caif_link_selector - Physical Link Selection.
22 * @CAIF_LINK_HIGH_BANDW: Physical interface for high-bandwidth 21 * @CAIF_LINK_HIGH_BANDW: Physical interface for high-bandwidth
@@ -59,7 +58,7 @@ enum caif_channel_priority {
59/** 58/**
60 * enum caif_protocol_type - CAIF Channel type. 59 * enum caif_protocol_type - CAIF Channel type.
61 * @CAIFPROTO_AT: Classic AT channel. 60 * @CAIFPROTO_AT: Classic AT channel.
62 * @CAIFPROTO_DATAGRAM: Datagram channel. 61 * @CAIFPROTO_DATAGRAM: Datagram channel.
63 * @CAIFPROTO_DATAGRAM_LOOP: Datagram loopback channel, used for testing. 62 * @CAIFPROTO_DATAGRAM_LOOP: Datagram loopback channel, used for testing.
64 * @CAIFPROTO_UTIL: Utility (Psock) channel. 63 * @CAIFPROTO_UTIL: Utility (Psock) channel.
65 * @CAIFPROTO_RFM: Remote File Manager 64 * @CAIFPROTO_RFM: Remote File Manager
@@ -87,6 +86,7 @@ enum caif_at_type {
87 86
88/** 87/**
89 * struct sockaddr_caif - the sockaddr structure for CAIF sockets. 88 * struct sockaddr_caif - the sockaddr structure for CAIF sockets.
89 * @family: Address family number, must be AF_CAIF.
90 * @u: Union of address data 'switched' by family. 90 * @u: Union of address data 'switched' by family.
91 * : 91 * :
92 * @u.at: Applies when family = CAIFPROTO_AT. 92 * @u.at: Applies when family = CAIFPROTO_AT.
@@ -153,6 +153,7 @@ struct sockaddr_caif {
153 * 153 *
154 * 154 *
155 * This enum defines the CAIF Socket options to be used on a socket 155 * This enum defines the CAIF Socket options to be used on a socket
156 * of type PF_CAIF.
156 * 157 *
157 */ 158 */
158enum caif_socket_opts { 159enum caif_socket_opts {
diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h
index b7cdbb4373df..8723491f7dfd 100644
--- a/include/linux/dcbnl.h
+++ b/include/linux/dcbnl.h
@@ -22,8 +22,6 @@
22 22
23#include <linux/types.h> 23#include <linux/types.h>
24 24
25#define DCB_PROTO_VERSION 1
26
27struct dcbmsg { 25struct dcbmsg {
28 __u8 dcb_family; 26 __u8 dcb_family;
29 __u8 cmd; 27 __u8 cmd;
diff --git a/include/linux/fib_rules.h b/include/linux/fib_rules.h
index 04a397619ebe..51da65b68b85 100644
--- a/include/linux/fib_rules.h
+++ b/include/linux/fib_rules.h
@@ -15,14 +15,6 @@
15/* try to find source address in routing lookups */ 15/* try to find source address in routing lookups */
16#define FIB_RULE_FIND_SADDR 0x00010000 16#define FIB_RULE_FIND_SADDR 0x00010000
17 17
18/* fib_rules families. values up to 127 are reserved for real address
19 * families, values above 128 may be used arbitrarily.
20 */
21#define FIB_RULES_IPV4 AF_INET
22#define FIB_RULES_IPV6 AF_INET6
23#define FIB_RULES_DECNET AF_DECnet
24#define FIB_RULES_IPMR 128
25
26struct fib_rule_hdr { 18struct fib_rule_hdr {
27 __u8 family; 19 __u8 family;
28 __u8 dst_len; 20 __u8 dst_len;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 29a0e3db9f43..151f5d703b7e 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -123,7 +123,8 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
123#define SKF_AD_NLATTR_NEST 16 123#define SKF_AD_NLATTR_NEST 16
124#define SKF_AD_MARK 20 124#define SKF_AD_MARK 20
125#define SKF_AD_QUEUE 24 125#define SKF_AD_QUEUE 24
126#define SKF_AD_MAX 28 126#define SKF_AD_HATYPE 28
127#define SKF_AD_MAX 32
127#define SKF_NET_OFF (-0x100000) 128#define SKF_NET_OFF (-0x100000)
128#define SKF_LL_OFF (-0x200000) 129#define SKF_LL_OFF (-0x200000)
129 130
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
index 40b11013408e..81f3b14d5d76 100644
--- a/include/linux/firewire-cdev.h
+++ b/include/linux/firewire-cdev.h
@@ -1,21 +1,26 @@
1/* 1/*
2 * Char device interface. 2 * Char device interface.
3 * 3 *
4 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net> 4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * it under the terms of the GNU General Public License as published by 7 * copy of this software and associated documentation files (the "Software"),
8 * the Free Software Foundation; either version 2 of the License, or 8 * to deal in the Software without restriction, including without limitation
9 * (at your option) any later version. 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * 10 * and/or sell copies of the Software, and to permit persons to whom the
11 * This program is distributed in the hope that it will be useful, 11 * Software is furnished to do so, subject to the following conditions:
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * The above copyright notice and this permission notice (including the next
14 * GNU General Public License for more details. 14 * paragraph) shall be included in all copies or substantial portions of the
15 * 15 * Software.
16 * You should have received a copy of the GNU General Public License 16 *
17 * along with this program; if not, write to the Free Software Foundation, 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
19 */ 24 */
20 25
21#ifndef _LINUX_FIREWIRE_CDEV_H 26#ifndef _LINUX_FIREWIRE_CDEV_H
@@ -438,7 +443,7 @@ struct fw_cdev_remove_descriptor {
438 * @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE 443 * @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE
439 * @header_size: Header size to strip for receive contexts 444 * @header_size: Header size to strip for receive contexts
440 * @channel: Channel to bind to 445 * @channel: Channel to bind to
441 * @speed: Speed to transmit at 446 * @speed: Speed for transmit contexts
442 * @closure: To be returned in &fw_cdev_event_iso_interrupt 447 * @closure: To be returned in &fw_cdev_event_iso_interrupt
443 * @handle: Handle to context, written back by kernel 448 * @handle: Handle to context, written back by kernel
444 * 449 *
@@ -451,6 +456,9 @@ struct fw_cdev_remove_descriptor {
451 * If a context was successfully created, the kernel writes back a handle to the 456 * If a context was successfully created, the kernel writes back a handle to the
452 * context, which must be passed in for subsequent operations on that context. 457 * context, which must be passed in for subsequent operations on that context.
453 * 458 *
459 * For receive contexts, @header_size must be at least 4 and must be a multiple
460 * of 4.
461 *
454 * Note that the effect of a @header_size > 4 depends on 462 * Note that the effect of a @header_size > 4 depends on
455 * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt. 463 * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt.
456 */ 464 */
@@ -481,10 +489,34 @@ struct fw_cdev_create_iso_context {
481 * 489 *
482 * &struct fw_cdev_iso_packet is used to describe isochronous packet queues. 490 * &struct fw_cdev_iso_packet is used to describe isochronous packet queues.
483 * 491 *
484 * Use the FW_CDEV_ISO_ macros to fill in @control. The sy and tag fields are 492 * Use the FW_CDEV_ISO_ macros to fill in @control.
485 * specified by IEEE 1394a and IEC 61883. 493 *
486 * 494 * For transmit packets, the header length must be a multiple of 4 and specifies
487 * FIXME - finish this documentation 495 * the numbers of bytes in @header that will be prepended to the packet's
496 * payload; these bytes are copied into the kernel and will not be accessed
497 * after the ioctl has returned. The sy and tag fields are copied to the iso
498 * packet header (these fields are specified by IEEE 1394a and IEC 61883-1).
499 * The skip flag specifies that no packet is to be sent in a frame; when using
500 * this, all other fields except the interrupt flag must be zero.
501 *
502 * For receive packets, the header length must be a multiple of the context's
503 * header size; if the header length is larger than the context's header size,
504 * multiple packets are queued for this entry. The sy and tag fields are
505 * ignored. If the sync flag is set, the context drops all packets until
506 * a packet with a matching sy field is received (the sync value to wait for is
507 * specified in the &fw_cdev_start_iso structure). The payload length defines
508 * how many payload bytes can be received for one packet (in addition to payload
509 * quadlets that have been defined as headers and are stripped and returned in
510 * the &fw_cdev_event_iso_interrupt structure). If more bytes are received, the
511 * additional bytes are dropped. If less bytes are received, the remaining
512 * bytes in this part of the payload buffer will not be written to, not even by
513 * the next packet, i.e., packets received in consecutive frames will not
514 * necessarily be consecutive in memory. If an entry has queued multiple
515 * packets, the payload length is divided equally among them.
516 *
517 * When a packet with the interrupt flag set has been completed, the
518 * &fw_cdev_event_iso_interrupt event will be sent. An entry that has queued
519 * multiple receive packets is completed when its last packet is completed.
488 */ 520 */
489struct fw_cdev_iso_packet { 521struct fw_cdev_iso_packet {
490 __u32 control; 522 __u32 control;
@@ -501,7 +533,7 @@ struct fw_cdev_iso_packet {
501 * Queue a number of isochronous packets for reception or transmission. 533 * Queue a number of isochronous packets for reception or transmission.
502 * This ioctl takes a pointer to an array of &fw_cdev_iso_packet structs, 534 * This ioctl takes a pointer to an array of &fw_cdev_iso_packet structs,
503 * which describe how to transmit from or receive into a contiguous region 535 * which describe how to transmit from or receive into a contiguous region
504 * of a mmap()'ed payload buffer. As part of the packet descriptors, 536 * of a mmap()'ed payload buffer. As part of transmit packet descriptors,
505 * a series of headers can be supplied, which will be prepended to the 537 * a series of headers can be supplied, which will be prepended to the
506 * payload during DMA. 538 * payload during DMA.
507 * 539 *
@@ -620,8 +652,8 @@ struct fw_cdev_get_cycle_timer2 {
620 * instead of allocated. 652 * instead of allocated.
621 * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation. 653 * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation.
622 * 654 *
623 * To summarize, %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE allocates iso resources 655 * To summarize, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE allocates iso resources
624 * for the lifetime of the fd or handle. 656 * for the lifetime of the fd or @handle.
625 * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources 657 * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources
626 * for the duration of a bus generation. 658 * for the duration of a bus generation.
627 * 659 *
diff --git a/include/linux/firewire-constants.h b/include/linux/firewire-constants.h
index b316770a43fd..9c63f06e67f2 100644
--- a/include/linux/firewire-constants.h
+++ b/include/linux/firewire-constants.h
@@ -1,3 +1,28 @@
1/*
2 * IEEE 1394 constants.
3 *
4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
1#ifndef _LINUX_FIREWIRE_CONSTANTS_H 26#ifndef _LINUX_FIREWIRE_CONSTANTS_H
2#define _LINUX_FIREWIRE_CONSTANTS_H 27#define _LINUX_FIREWIRE_CONSTANTS_H
3 28
@@ -21,7 +46,7 @@
21#define EXTCODE_WRAP_ADD 0x6 46#define EXTCODE_WRAP_ADD 0x6
22#define EXTCODE_VENDOR_DEPENDENT 0x7 47#define EXTCODE_VENDOR_DEPENDENT 0x7
23 48
24/* Juju specific tcodes */ 49/* Linux firewire-core (Juju) specific tcodes */
25#define TCODE_LOCK_MASK_SWAP (0x10 | EXTCODE_MASK_SWAP) 50#define TCODE_LOCK_MASK_SWAP (0x10 | EXTCODE_MASK_SWAP)
26#define TCODE_LOCK_COMPARE_SWAP (0x10 | EXTCODE_COMPARE_SWAP) 51#define TCODE_LOCK_COMPARE_SWAP (0x10 | EXTCODE_COMPARE_SWAP)
27#define TCODE_LOCK_FETCH_ADD (0x10 | EXTCODE_FETCH_ADD) 52#define TCODE_LOCK_FETCH_ADD (0x10 | EXTCODE_FETCH_ADD)
@@ -36,7 +61,7 @@
36#define RCODE_TYPE_ERROR 0x6 61#define RCODE_TYPE_ERROR 0x6
37#define RCODE_ADDRESS_ERROR 0x7 62#define RCODE_ADDRESS_ERROR 0x7
38 63
39/* Juju specific rcodes */ 64/* Linux firewire-core (Juju) specific rcodes */
40#define RCODE_SEND_ERROR 0x10 65#define RCODE_SEND_ERROR 0x10
41#define RCODE_CANCELLED 0x11 66#define RCODE_CANCELLED 0x11
42#define RCODE_BUSY 0x12 67#define RCODE_BUSY 0x12
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 39d57bc6cc71..018d382f6f92 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1280,10 +1280,12 @@ static inline int lock_may_write(struct inode *inode, loff_t start,
1280 1280
1281 1281
1282struct fasync_struct { 1282struct fasync_struct {
1283 int magic; 1283 spinlock_t fa_lock;
1284 int fa_fd; 1284 int magic;
1285 struct fasync_struct *fa_next; /* singly linked list */ 1285 int fa_fd;
1286 struct file *fa_file; 1286 struct fasync_struct *fa_next; /* singly linked list */
1287 struct file *fa_file;
1288 struct rcu_head fa_rcu;
1287}; 1289};
1288 1290
1289#define FASYNC_MAGIC 0x4601 1291#define FASYNC_MAGIC 0x4601
@@ -1292,8 +1294,6 @@ struct fasync_struct {
1292extern int fasync_helper(int, struct file *, int, struct fasync_struct **); 1294extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
1293/* can be called from interrupts */ 1295/* can be called from interrupts */
1294extern void kill_fasync(struct fasync_struct **, int, int); 1296extern void kill_fasync(struct fasync_struct **, int, int);
1295/* only for net: no internal synchronization */
1296extern void __kill_fasync(struct fasync_struct *, int, int);
1297 1297
1298extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force); 1298extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
1299extern int f_setown(struct file *filp, unsigned long arg, int force); 1299extern int f_setown(struct file *filp, unsigned long arg, int force);
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 1252ba1fbff5..97b2eae6a22c 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -876,6 +876,7 @@ struct ieee80211_ht_cap {
876#define IEEE80211_HT_CAP_SGI_40 0x0040 876#define IEEE80211_HT_CAP_SGI_40 0x0040
877#define IEEE80211_HT_CAP_TX_STBC 0x0080 877#define IEEE80211_HT_CAP_TX_STBC 0x0080
878#define IEEE80211_HT_CAP_RX_STBC 0x0300 878#define IEEE80211_HT_CAP_RX_STBC 0x0300
879#define IEEE80211_HT_CAP_RX_STBC_SHIFT 8
879#define IEEE80211_HT_CAP_DELAY_BA 0x0400 880#define IEEE80211_HT_CAP_DELAY_BA 0x0400
880#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800 881#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
881#define IEEE80211_HT_CAP_DSSSCCK40 0x1000 882#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
diff --git a/include/linux/if.h b/include/linux/if.h
index 3a9f410a296b..be350e62a905 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -71,6 +71,8 @@
71 * release skb->dst 71 * release skb->dst
72 */ 72 */
73#define IFF_DONT_BRIDGE 0x800 /* disallow bridging this ether dev */ 73#define IFF_DONT_BRIDGE 0x800 /* disallow bridging this ether dev */
74#define IFF_IN_NETPOLL 0x1000 /* whether we are processing netpoll */
75#define IFF_DISABLE_NETPOLL 0x2000 /* disable netpoll at run-time */
74 76
75#define IF_GET_IFACE 0x0001 /* for querying only */ 77#define IF_GET_IFACE 0x0001 /* for querying only */
76#define IF_GET_PROTO 0x0002 78#define IF_GET_PROTO 0x0002
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index 1350a246893a..06b1829731fd 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -51,6 +51,8 @@
51#define TUNSETSNDBUF _IOW('T', 212, int) 51#define TUNSETSNDBUF _IOW('T', 212, int)
52#define TUNATTACHFILTER _IOW('T', 213, struct sock_fprog) 52#define TUNATTACHFILTER _IOW('T', 213, struct sock_fprog)
53#define TUNDETACHFILTER _IOW('T', 214, struct sock_fprog) 53#define TUNDETACHFILTER _IOW('T', 214, struct sock_fprog)
54#define TUNGETVNETHDRSZ _IOR('T', 215, int)
55#define TUNSETVNETHDRSZ _IOW('T', 216, int)
54 56
55/* TUNSETIFF ifr flags */ 57/* TUNSETIFF ifr flags */
56#define IFF_TUN 0x0001 58#define IFF_TUN 0x0001
diff --git a/include/linux/if_x25.h b/include/linux/if_x25.h
new file mode 100644
index 000000000000..897765f5feb8
--- /dev/null
+++ b/include/linux/if_x25.h
@@ -0,0 +1,26 @@
1/*
2 * Linux X.25 packet to device interface
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _IF_X25_H
16#define _IF_X25_H
17
18#include <linux/types.h>
19
20/* Documentation/networking/x25-iface.txt */
21#define X25_IFACE_DATA 0x00
22#define X25_IFACE_CONNECT 0x01
23#define X25_IFACE_DISCONNECT 0x02
24#define X25_IFACE_PARAMS 0x03
25
26#endif /* _IF_X25_H */
diff --git a/include/linux/in6.h b/include/linux/in6.h
index bd55c6e46b2e..c4bf46f764bf 100644
--- a/include/linux/in6.h
+++ b/include/linux/in6.h
@@ -221,10 +221,10 @@ struct in6_flowlabel_req {
221#define IPV6_RTHDR 57 221#define IPV6_RTHDR 57
222#define IPV6_RECVDSTOPTS 58 222#define IPV6_RECVDSTOPTS 58
223#define IPV6_DSTOPTS 59 223#define IPV6_DSTOPTS 59
224#if 0 /* not yet */
225#define IPV6_RECVPATHMTU 60 224#define IPV6_RECVPATHMTU 60
226#define IPV6_PATHMTU 61 225#define IPV6_PATHMTU 61
227#define IPV6_DONTFRAG 62 226#define IPV6_DONTFRAG 62
227#if 0 /* not yet */
228#define IPV6_USE_MIN_MTU 63 228#define IPV6_USE_MIN_MTU 63
229#endif 229#endif
230 230
@@ -265,6 +265,9 @@ struct in6_flowlabel_req {
265#define IPV6_PREFER_SRC_CGA 0x0008 265#define IPV6_PREFER_SRC_CGA 0x0008
266#define IPV6_PREFER_SRC_NONCGA 0x0800 266#define IPV6_PREFER_SRC_NONCGA 0x0800
267 267
268/* RFC5082: Generalized Ttl Security Mechanism */
269#define IPV6_MINHOPCOUNT 73
270
268/* 271/*
269 * Multicast Routing: 272 * Multicast Routing:
270 * see include/linux/mroute6.h. 273 * see include/linux/mroute6.h.
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
index 3bd018baae20..c964cd7f436a 100644
--- a/include/linux/input/matrix_keypad.h
+++ b/include/linux/input/matrix_keypad.h
@@ -44,6 +44,7 @@ struct matrix_keymap_data {
44 * @active_low: gpio polarity 44 * @active_low: gpio polarity
45 * @wakeup: controls whether the device should be set up as wakeup 45 * @wakeup: controls whether the device should be set up as wakeup
46 * source 46 * source
47 * @no_autorepeat: disable key autorepeat
47 * 48 *
48 * This structure represents platform-specific data that use used by 49 * This structure represents platform-specific data that use used by
49 * matrix_keypad driver to perform proper initialization. 50 * matrix_keypad driver to perform proper initialization.
@@ -64,6 +65,7 @@ struct matrix_keypad_platform_data {
64 65
65 bool active_low; 66 bool active_low;
66 bool wakeup; 67 bool wakeup;
68 bool no_autorepeat;
67}; 69};
68 70
69/** 71/**
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 7bdf6ffe2b49..0e269038bb38 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -21,6 +21,10 @@ struct in6_pktinfo {
21 int ipi6_ifindex; 21 int ipi6_ifindex;
22}; 22};
23 23
24struct ip6_mtuinfo {
25 struct sockaddr_in6 ip6m_addr;
26 __u32 ip6m_mtu;
27};
24 28
25struct in6_ifreq { 29struct in6_ifreq {
26 struct in6_addr ifr6_addr; 30 struct in6_addr ifr6_addr;
@@ -254,6 +258,7 @@ struct inet6_skb_parm {
254}; 258};
255 259
256#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb)) 260#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
261#define IP6CBMTU(skb) ((struct ip6_mtuinfo *)((skb)->cb))
257 262
258static inline int inet6_iif(const struct sk_buff *skb) 263static inline int inet6_iif(const struct sk_buff *skb)
259{ 264{
@@ -335,21 +340,25 @@ struct ipv6_pinfo {
335 dstopts:1, 340 dstopts:1,
336 odstopts:1, 341 odstopts:1,
337 rxflow:1, 342 rxflow:1,
338 rxtclass:1; 343 rxtclass:1,
344 rxpmtu:1;
339 } bits; 345 } bits;
340 __u16 all; 346 __u16 all;
341 } rxopt; 347 } rxopt;
342 348
343 /* sockopt flags */ 349 /* sockopt flags */
344 __u8 recverr:1, 350 __u16 recverr:1,
345 sndflow:1, 351 sndflow:1,
346 pmtudisc:2, 352 pmtudisc:2,
347 ipv6only:1, 353 ipv6only:1,
348 srcprefs:3; /* 001: prefer temporary address 354 srcprefs:3, /* 001: prefer temporary address
349 * 010: prefer public address 355 * 010: prefer public address
350 * 100: prefer care-of address 356 * 100: prefer care-of address
351 */ 357 */
358 dontfrag:1;
359 __u8 min_hopcount;
352 __u8 tclass; 360 __u8 tclass;
361 __u8 padding;
353 362
354 __u32 dst_cookie; 363 __u32 dst_cookie;
355 364
@@ -359,6 +368,7 @@ struct ipv6_pinfo {
359 368
360 struct ipv6_txoptions *opt; 369 struct ipv6_txoptions *opt;
361 struct sk_buff *pktoptions; 370 struct sk_buff *pktoptions;
371 struct sk_buff *rxpmtu;
362 struct { 372 struct {
363 struct ipv6_txoptions *opt; 373 struct ipv6_txoptions *opt;
364 u8 hop_limit; 374 u8 hop_limit;
diff --git a/include/linux/ks8842.h b/include/linux/ks8842.h
new file mode 100644
index 000000000000..da0341b8ca0a
--- /dev/null
+++ b/include/linux/ks8842.h
@@ -0,0 +1,34 @@
1/*
2 * ks8842.h KS8842 platform data struct definition
3 * Copyright (c) 2010 Intel Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19#ifndef _LINUX_KS8842_H
20#define _LINUX_KS8842_H
21
22#include <linux/if_ether.h>
23
24/**
25 * struct ks8842_platform_data - Platform data of the KS8842 network driver
26 * @macaddr: The MAC address of the device, set to all 0:s to use the on in
27 * the chip.
28 *
29 */
30struct ks8842_platform_data {
31 u8 macaddr[ETH_ALEN];
32};
33
34#endif
diff --git a/include/linux/net.h b/include/linux/net.h
index 4157b5d42bd6..2b4deeeb8646 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -59,6 +59,7 @@ typedef enum {
59#include <linux/wait.h> 59#include <linux/wait.h>
60#include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */ 60#include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */
61#include <linux/kmemcheck.h> 61#include <linux/kmemcheck.h>
62#include <linux/rcupdate.h>
62 63
63struct poll_table_struct; 64struct poll_table_struct;
64struct pipe_inode_info; 65struct pipe_inode_info;
@@ -116,6 +117,12 @@ enum sock_shutdown_cmd {
116 SHUT_RDWR = 2, 117 SHUT_RDWR = 2,
117}; 118};
118 119
120struct socket_wq {
121 wait_queue_head_t wait;
122 struct fasync_struct *fasync_list;
123 struct rcu_head rcu;
124} ____cacheline_aligned_in_smp;
125
119/** 126/**
120 * struct socket - general BSD socket 127 * struct socket - general BSD socket
121 * @state: socket state (%SS_CONNECTED, etc) 128 * @state: socket state (%SS_CONNECTED, etc)
@@ -135,11 +142,8 @@ struct socket {
135 kmemcheck_bitfield_end(type); 142 kmemcheck_bitfield_end(type);
136 143
137 unsigned long flags; 144 unsigned long flags;
138 /* 145
139 * Please keep fasync_list & wait fields in the same cache line 146 struct socket_wq *wq;
140 */
141 struct fasync_struct *fasync_list;
142 wait_queue_head_t wait;
143 147
144 struct file *file; 148 struct file *file;
145 struct sock *sk; 149 struct sock *sk;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3c5ed5f5274e..69022d47d6f2 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -218,16 +218,6 @@ struct neighbour;
218struct neigh_parms; 218struct neigh_parms;
219struct sk_buff; 219struct sk_buff;
220 220
221struct netif_rx_stats {
222 unsigned total;
223 unsigned dropped;
224 unsigned time_squeeze;
225 unsigned cpu_collision;
226 unsigned received_rps;
227};
228
229DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
230
231struct netdev_hw_addr { 221struct netdev_hw_addr {
232 struct list_head list; 222 struct list_head list;
233 unsigned char addr[MAX_ADDR_LEN]; 223 unsigned char addr[MAX_ADDR_LEN];
@@ -734,6 +724,7 @@ struct net_device_ops {
734 unsigned short vid); 724 unsigned short vid);
735#ifdef CONFIG_NET_POLL_CONTROLLER 725#ifdef CONFIG_NET_POLL_CONTROLLER
736 void (*ndo_poll_controller)(struct net_device *dev); 726 void (*ndo_poll_controller)(struct net_device *dev);
727 void (*ndo_netpoll_cleanup)(struct net_device *dev);
737#endif 728#endif
738 int (*ndo_set_vf_mac)(struct net_device *dev, 729 int (*ndo_set_vf_mac)(struct net_device *dev,
739 int queue, u8 *mac); 730 int queue, u8 *mac);
@@ -888,7 +879,7 @@ struct net_device {
888 unsigned char operstate; /* RFC2863 operstate */ 879 unsigned char operstate; /* RFC2863 operstate */
889 unsigned char link_mode; /* mapping policy to operstate */ 880 unsigned char link_mode; /* mapping policy to operstate */
890 881
891 unsigned mtu; /* interface MTU value */ 882 unsigned int mtu; /* interface MTU value */
892 unsigned short type; /* interface hardware type */ 883 unsigned short type; /* interface hardware type */
893 unsigned short hard_header_len; /* hardware hdr length */ 884 unsigned short hard_header_len; /* hardware hdr length */
894 885
@@ -1385,8 +1376,16 @@ static inline int unregister_gifconf(unsigned int family)
1385 */ 1376 */
1386struct softnet_data { 1377struct softnet_data {
1387 struct Qdisc *output_queue; 1378 struct Qdisc *output_queue;
1379 struct Qdisc **output_queue_tailp;
1388 struct list_head poll_list; 1380 struct list_head poll_list;
1389 struct sk_buff *completion_queue; 1381 struct sk_buff *completion_queue;
1382 struct sk_buff_head process_queue;
1383
1384 /* stats */
1385 unsigned int processed;
1386 unsigned int time_squeeze;
1387 unsigned int cpu_collision;
1388 unsigned int received_rps;
1390 1389
1391#ifdef CONFIG_RPS 1390#ifdef CONFIG_RPS
1392 struct softnet_data *rps_ipi_list; 1391 struct softnet_data *rps_ipi_list;
@@ -1397,14 +1396,16 @@ struct softnet_data {
1397 unsigned int cpu; 1396 unsigned int cpu;
1398 unsigned int input_queue_head; 1397 unsigned int input_queue_head;
1399#endif 1398#endif
1399 unsigned dropped;
1400 struct sk_buff_head input_pkt_queue; 1400 struct sk_buff_head input_pkt_queue;
1401 struct napi_struct backlog; 1401 struct napi_struct backlog;
1402}; 1402};
1403 1403
1404static inline void input_queue_head_incr(struct softnet_data *sd) 1404static inline void input_queue_head_add(struct softnet_data *sd,
1405 unsigned int len)
1405{ 1406{
1406#ifdef CONFIG_RPS 1407#ifdef CONFIG_RPS
1407 sd->input_queue_head++; 1408 sd->input_queue_head += len;
1408#endif 1409#endif
1409} 1410}
1410 1411
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index a765ea898549..e9e231215865 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -14,6 +14,7 @@
14 14
15struct netpoll { 15struct netpoll {
16 struct net_device *dev; 16 struct net_device *dev;
17 struct net_device *real_dev;
17 char dev_name[IFNAMSIZ]; 18 char dev_name[IFNAMSIZ];
18 const char *name; 19 const char *name;
19 void (*rx_hook)(struct netpoll *, int, char *, int); 20 void (*rx_hook)(struct netpoll *, int, char *, int);
@@ -36,8 +37,11 @@ struct netpoll_info {
36 struct sk_buff_head txq; 37 struct sk_buff_head txq;
37 38
38 struct delayed_work tx_work; 39 struct delayed_work tx_work;
40
41 struct netpoll *netpoll;
39}; 42};
40 43
44void netpoll_poll_dev(struct net_device *dev);
41void netpoll_poll(struct netpoll *np); 45void netpoll_poll(struct netpoll *np);
42void netpoll_send_udp(struct netpoll *np, const char *msg, int len); 46void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
43void netpoll_print_options(struct netpoll *np); 47void netpoll_print_options(struct netpoll *np);
@@ -47,22 +51,23 @@ int netpoll_trap(void);
47void netpoll_set_trap(int trap); 51void netpoll_set_trap(int trap);
48void netpoll_cleanup(struct netpoll *np); 52void netpoll_cleanup(struct netpoll *np);
49int __netpoll_rx(struct sk_buff *skb); 53int __netpoll_rx(struct sk_buff *skb);
54void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
50 55
51 56
52#ifdef CONFIG_NETPOLL 57#ifdef CONFIG_NETPOLL
53static inline int netpoll_rx(struct sk_buff *skb) 58static inline bool netpoll_rx(struct sk_buff *skb)
54{ 59{
55 struct netpoll_info *npinfo = skb->dev->npinfo; 60 struct netpoll_info *npinfo = skb->dev->npinfo;
56 unsigned long flags; 61 unsigned long flags;
57 int ret = 0; 62 bool ret = false;
58 63
59 if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags)) 64 if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
60 return 0; 65 return false;
61 66
62 spin_lock_irqsave(&npinfo->rx_lock, flags); 67 spin_lock_irqsave(&npinfo->rx_lock, flags);
63 /* check rx_flags again with the lock held */ 68 /* check rx_flags again with the lock held */
64 if (npinfo->rx_flags && __netpoll_rx(skb)) 69 if (npinfo->rx_flags && __netpoll_rx(skb))
65 ret = 1; 70 ret = true;
66 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 71 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
67 72
68 return ret; 73 return ret;
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 717a5e54eb1d..e82957acea56 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -176,6 +176,7 @@ struct nfs_server {
176#define NFS_CAP_ATIME (1U << 11) 176#define NFS_CAP_ATIME (1U << 11)
177#define NFS_CAP_CTIME (1U << 12) 177#define NFS_CAP_CTIME (1U << 12)
178#define NFS_CAP_MTIME (1U << 13) 178#define NFS_CAP_MTIME (1U << 13)
179#define NFS_CAP_POSIX_LOCK (1U << 14)
179 180
180 181
181/* maximum number of slots to use */ 182/* maximum number of slots to use */
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 2ea3edeee7aa..f8750f9a65b8 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -709,6 +709,9 @@ enum nl80211_commands {
709 * NL80211_CMD_AUTHENTICATE, NL80211_CMD_DEAUTHENTICATE, 709 * NL80211_CMD_AUTHENTICATE, NL80211_CMD_DEAUTHENTICATE,
710 * NL80211_CMD_DISASSOCIATE. 710 * NL80211_CMD_DISASSOCIATE.
711 * 711 *
712 * @NL80211_ATTR_AP_ISOLATE: (AP mode) Do not forward traffic between stations
713 * connected to this BSS.
714 *
712 * @NL80211_ATTR_MAX: highest attribute number currently defined 715 * @NL80211_ATTR_MAX: highest attribute number currently defined
713 * @__NL80211_ATTR_AFTER_LAST: internal use 716 * @__NL80211_ATTR_AFTER_LAST: internal use
714 */ 717 */
@@ -864,6 +867,8 @@ enum nl80211_attrs {
864 867
865 NL80211_ATTR_LOCAL_STATE_CHANGE, 868 NL80211_ATTR_LOCAL_STATE_CHANGE,
866 869
870 NL80211_ATTR_AP_ISOLATE,
871
867 /* add attributes here, update the policy in nl80211.c */ 872 /* add attributes here, update the policy in nl80211.c */
868 873
869 __NL80211_ATTR_AFTER_LAST, 874 __NL80211_ATTR_AFTER_LAST,
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 9c5d3fad01f3..7c3609622334 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -206,6 +206,7 @@ static inline int notifier_to_errno(int ret)
206#define NETDEV_POST_TYPE_CHANGE 0x000F 206#define NETDEV_POST_TYPE_CHANGE 0x000F
207#define NETDEV_POST_INIT 0x0010 207#define NETDEV_POST_INIT 0x0010
208#define NETDEV_UNREGISTER_BATCH 0x0011 208#define NETDEV_UNREGISTER_BATCH 0x0011
209#define NETDEV_BONDING_DESLAVE 0x0012
209 210
210#define SYS_DOWN 0x0001 /* Notify of system down */ 211#define SYS_DOWN 0x0001 /* Notify of system down */
211#define SYS_RESTART SYS_DOWN 212#define SYS_RESTART SYS_DOWN
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index c8f302991b66..c4c3d68be19a 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -442,7 +442,10 @@
442#define PCI_EXP_LNKCTL_LABIE 0x0800 /* Lnk Autonomous Bandwidth Interrupt Enable */ 442#define PCI_EXP_LNKCTL_LABIE 0x0800 /* Lnk Autonomous Bandwidth Interrupt Enable */
443#define PCI_EXP_LNKSTA 18 /* Link Status */ 443#define PCI_EXP_LNKSTA 18 /* Link Status */
444#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ 444#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */
445#define PCI_EXP_LNKSTA_CLS_2_5GB 0x01 /* Current Link Speed 2.5GT/s */
446#define PCI_EXP_LNKSTA_CLS_5_0GB 0x02 /* Current Link Speed 5.0GT/s */
445#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Nogotiated Link Width */ 447#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Nogotiated Link Width */
448#define PCI_EXP_LNKSTA_NLW_SHIFT 4 /* start of NLW mask in link status */
446#define PCI_EXP_LNKSTA_LT 0x0800 /* Link Training */ 449#define PCI_EXP_LNKSTA_LT 0x0800 /* Link Training */
447#define PCI_EXP_LNKSTA_SLC 0x1000 /* Slot Clock Configuration */ 450#define PCI_EXP_LNKSTA_SLC 0x1000 /* Slot Clock Configuration */
448#define PCI_EXP_LNKSTA_DLLLA 0x2000 /* Data Link Layer Link Active */ 451#define PCI_EXP_LNKSTA_DLLLA 0x2000 /* Data Link Layer Link Active */
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 004908b104d5..4ec3b38ce9c5 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -429,6 +429,23 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
429 pos = rcu_dereference_raw(pos->next)) 429 pos = rcu_dereference_raw(pos->next))
430 430
431/** 431/**
432 * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
433 * @tpos: the type * to use as a loop cursor.
434 * @pos: the &struct hlist_node to use as a loop cursor.
435 * @head: the head for your list.
436 * @member: the name of the hlist_node within the struct.
437 *
438 * This list-traversal primitive may safely run concurrently with
439 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
440 * as long as the traversal is guarded by rcu_read_lock().
441 */
442#define hlist_for_each_entry_rcu_bh(tpos, pos, head, member) \
443 for (pos = rcu_dereference_bh((head)->first); \
444 pos && ({ prefetch(pos->next); 1; }) && \
445 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
446 pos = rcu_dereference_bh(pos->next))
447
448/**
432 * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point 449 * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
433 * @tpos: the type * to use as a loop cursor. 450 * @tpos: the type * to use as a loop cursor.
434 * @pos: the &struct hlist_node to use as a loop cursor. 451 * @pos: the &struct hlist_node to use as a loop cursor.
@@ -440,6 +457,18 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
440 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ 457 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
441 pos = rcu_dereference(pos->next)) 458 pos = rcu_dereference(pos->next))
442 459
460/**
461 * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
462 * @tpos: the type * to use as a loop cursor.
463 * @pos: the &struct hlist_node to use as a loop cursor.
464 * @member: the name of the hlist_node within the struct.
465 */
466#define hlist_for_each_entry_continue_rcu_bh(tpos, pos, member) \
467 for (pos = rcu_dereference_bh((pos)->next); \
468 pos && ({ prefetch(pos->next); 1; }) && \
469 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
470 pos = rcu_dereference_bh(pos->next))
471
443 472
444#endif /* __KERNEL__ */ 473#endif /* __KERNEL__ */
445#endif 474#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 872a98e13d6a..07db2feb8572 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -101,10 +101,7 @@ extern struct lockdep_map rcu_sched_lock_map;
101# define rcu_read_release_sched() \ 101# define rcu_read_release_sched() \
102 lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) 102 lock_release(&rcu_sched_lock_map, 1, _THIS_IP_)
103 103
104static inline int debug_lockdep_rcu_enabled(void) 104extern int debug_lockdep_rcu_enabled(void);
105{
106 return likely(rcu_scheduler_active && debug_locks);
107}
108 105
109/** 106/**
110 * rcu_read_lock_held - might we be in RCU read-side critical section? 107 * rcu_read_lock_held - might we be in RCU read-side critical section?
@@ -195,12 +192,30 @@ static inline int rcu_read_lock_sched_held(void)
195 192
196/** 193/**
197 * rcu_dereference_check - rcu_dereference with debug checking 194 * rcu_dereference_check - rcu_dereference with debug checking
195 * @p: The pointer to read, prior to dereferencing
196 * @c: The conditions under which the dereference will take place
197 *
198 * Do an rcu_dereference(), but check that the conditions under which the
199 * dereference will take place are correct. Typically the conditions indicate
200 * the various locking conditions that should be held at that point. The check
201 * should return true if the conditions are satisfied.
202 *
203 * For example:
204 *
205 * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() ||
206 * lockdep_is_held(&foo->lock));
198 * 207 *
199 * Do an rcu_dereference(), but check that the context is correct. 208 * could be used to indicate to lockdep that foo->bar may only be dereferenced
200 * For example, rcu_dereference_check(gp, rcu_read_lock_held()) to 209 * if either the RCU read lock is held, or that the lock required to replace
201 * ensure that the rcu_dereference_check() executes within an RCU 210 * the bar struct at foo->bar is held.
202 * read-side critical section. It is also possible to check for 211 *
203 * locks being held, for example, by using lockdep_is_held(). 212 * Note that the list of conditions may also include indications of when a lock
213 * need not be held, for example during initialisation or destruction of the
214 * target struct:
215 *
216 * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() ||
217 * lockdep_is_held(&foo->lock) ||
218 * atomic_read(&foo->usage) == 0);
204 */ 219 */
205#define rcu_dereference_check(p, c) \ 220#define rcu_dereference_check(p, c) \
206 ({ \ 221 ({ \
@@ -209,13 +224,45 @@ static inline int rcu_read_lock_sched_held(void)
209 rcu_dereference_raw(p); \ 224 rcu_dereference_raw(p); \
210 }) 225 })
211 226
227/**
228 * rcu_dereference_protected - fetch RCU pointer when updates prevented
229 *
230 * Return the value of the specified RCU-protected pointer, but omit
231 * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This
232 * is useful in cases where update-side locks prevent the value of the
233 * pointer from changing. Please note that this primitive does -not-
234 * prevent the compiler from repeating this reference or combining it
235 * with other references, so it should not be used without protection
236 * of appropriate locks.
237 */
238#define rcu_dereference_protected(p, c) \
239 ({ \
240 if (debug_lockdep_rcu_enabled() && !(c)) \
241 lockdep_rcu_dereference(__FILE__, __LINE__); \
242 (p); \
243 })
244
212#else /* #ifdef CONFIG_PROVE_RCU */ 245#else /* #ifdef CONFIG_PROVE_RCU */
213 246
214#define rcu_dereference_check(p, c) rcu_dereference_raw(p) 247#define rcu_dereference_check(p, c) rcu_dereference_raw(p)
248#define rcu_dereference_protected(p, c) (p)
215 249
216#endif /* #else #ifdef CONFIG_PROVE_RCU */ 250#endif /* #else #ifdef CONFIG_PROVE_RCU */
217 251
218/** 252/**
253 * rcu_access_pointer - fetch RCU pointer with no dereferencing
254 *
255 * Return the value of the specified RCU-protected pointer, but omit the
256 * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
257 * when the value of this pointer is accessed, but the pointer is not
258 * dereferenced, for example, when testing an RCU-protected pointer against
259 * NULL. This may also be used in cases where update-side locks prevent
260 * the value of the pointer from changing, but rcu_dereference_protected()
261 * is a lighter-weight primitive for this use case.
262 */
263#define rcu_access_pointer(p) ACCESS_ONCE(p)
264
265/**
219 * rcu_read_lock - mark the beginning of an RCU read-side critical section. 266 * rcu_read_lock - mark the beginning of an RCU read-side critical section.
220 * 267 *
221 * When synchronize_rcu() is invoked on one CPU while other CPUs 268 * When synchronize_rcu() is invoked on one CPU while other CPUs
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index d1c7c90e9cd4..5a42c36cb6aa 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -7,6 +7,12 @@
7#include <linux/if_addr.h> 7#include <linux/if_addr.h>
8#include <linux/neighbour.h> 8#include <linux/neighbour.h>
9 9
10/* rtnetlink families. Values up to 127 are reserved for real address
11 * families, values above 128 may be used arbitrarily.
12 */
13#define RTNL_FAMILY_IPMR 128
14#define RTNL_FAMILY_MAX 128
15
10/**** 16/****
11 * Routing/neighbour discovery messages. 17 * Routing/neighbour discovery messages.
12 ****/ 18 ****/
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 38501d20650c..c9525bce80f6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -187,7 +187,6 @@ union skb_shared_tx {
187 * the end of the header data, ie. at skb->end. 187 * the end of the header data, ie. at skb->end.
188 */ 188 */
189struct skb_shared_info { 189struct skb_shared_info {
190 atomic_t dataref;
191 unsigned short nr_frags; 190 unsigned short nr_frags;
192 unsigned short gso_size; 191 unsigned short gso_size;
193 /* Warning: this field is not always filled in (UFO)! */ 192 /* Warning: this field is not always filled in (UFO)! */
@@ -197,6 +196,12 @@ struct skb_shared_info {
197 union skb_shared_tx tx_flags; 196 union skb_shared_tx tx_flags;
198 struct sk_buff *frag_list; 197 struct sk_buff *frag_list;
199 struct skb_shared_hwtstamps hwtstamps; 198 struct skb_shared_hwtstamps hwtstamps;
199
200 /*
201 * Warning : all fields before dataref are cleared in __alloc_skb()
202 */
203 atomic_t dataref;
204
200 skb_frag_t frags[MAX_SKB_FRAGS]; 205 skb_frag_t frags[MAX_SKB_FRAGS];
201 /* Intermediate layers must ensure that destructor_arg 206 /* Intermediate layers must ensure that destructor_arg
202 * remains valid until skb destructor */ 207 * remains valid until skb destructor */
@@ -470,10 +475,6 @@ extern int skb_cow_data(struct sk_buff *skb, int tailbits,
470 struct sk_buff **trailer); 475 struct sk_buff **trailer);
471extern int skb_pad(struct sk_buff *skb, int pad); 476extern int skb_pad(struct sk_buff *skb, int pad);
472#define dev_kfree_skb(a) consume_skb(a) 477#define dev_kfree_skb(a) consume_skb(a)
473extern void skb_over_panic(struct sk_buff *skb, int len,
474 void *here);
475extern void skb_under_panic(struct sk_buff *skb, int len,
476 void *here);
477 478
478extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 479extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
479 int getfrag(void *from, char *to, int offset, 480 int getfrag(void *from, char *to, int offset,
@@ -1132,6 +1133,11 @@ static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1132 return skb->data += len; 1133 return skb->data += len;
1133} 1134}
1134 1135
1136static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1137{
1138 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1139}
1140
1135extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); 1141extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1136 1142
1137static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) 1143static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
@@ -1355,9 +1361,12 @@ static inline int skb_network_offset(const struct sk_buff *skb)
1355 * 1361 *
1356 * Various parts of the networking layer expect at least 32 bytes of 1362 * Various parts of the networking layer expect at least 32 bytes of
1357 * headroom, you should not reduce this. 1363 * headroom, you should not reduce this.
1364 * With RPS, we raised NET_SKB_PAD to 64 so that get_rps_cpus() fetches span
1365 * a 64 bytes aligned block to fit modern (>= 64 bytes) cache line sizes
1366 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
1358 */ 1367 */
1359#ifndef NET_SKB_PAD 1368#ifndef NET_SKB_PAD
1360#define NET_SKB_PAD 32 1369#define NET_SKB_PAD 64
1361#endif 1370#endif
1362 1371
1363extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); 1372extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
diff --git a/include/linux/spi/wl12xx.h b/include/linux/spi/wl12xx.h
index aed64ed3dc8a..a223ecbc71ef 100644
--- a/include/linux/spi/wl12xx.h
+++ b/include/linux/spi/wl12xx.h
@@ -26,6 +26,8 @@
26 26
27struct wl12xx_platform_data { 27struct wl12xx_platform_data {
28 void (*set_power)(bool enable); 28 void (*set_power)(bool enable);
29 /* SDIO only: IRQ number if WLAN_IRQ line is used, 0 for SDIO IRQs */
30 int irq;
29 bool use_eeprom; 31 bool use_eeprom;
30}; 32};
31 33
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 24f988547361..a2608bff9c78 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -305,6 +305,7 @@ struct ssb_bus {
305 /* ID information about the Chip. */ 305 /* ID information about the Chip. */
306 u16 chip_id; 306 u16 chip_id;
307 u16 chip_rev; 307 u16 chip_rev;
308 u16 sprom_offset;
308 u16 sprom_size; /* number of words in sprom */ 309 u16 sprom_size; /* number of words in sprom */
309 u8 chip_package; 310 u8 chip_package;
310 311
@@ -394,6 +395,9 @@ extern int ssb_bus_sdiobus_register(struct ssb_bus *bus,
394 395
395extern void ssb_bus_unregister(struct ssb_bus *bus); 396extern void ssb_bus_unregister(struct ssb_bus *bus);
396 397
398/* Does the device have an SPROM? */
399extern bool ssb_is_sprom_available(struct ssb_bus *bus);
400
397/* Set a fallback SPROM. 401/* Set a fallback SPROM.
398 * See kdoc at the function definition for complete documentation. */ 402 * See kdoc at the function definition for complete documentation. */
399extern int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom); 403extern int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom);
diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h
index 4e27acf0a92f..2cdf249b4e5f 100644
--- a/include/linux/ssb/ssb_driver_chipcommon.h
+++ b/include/linux/ssb/ssb_driver_chipcommon.h
@@ -53,6 +53,7 @@
53#define SSB_CHIPCO_CAP_64BIT 0x08000000 /* 64-bit Backplane */ 53#define SSB_CHIPCO_CAP_64BIT 0x08000000 /* 64-bit Backplane */
54#define SSB_CHIPCO_CAP_PMU 0x10000000 /* PMU available (rev >= 20) */ 54#define SSB_CHIPCO_CAP_PMU 0x10000000 /* PMU available (rev >= 20) */
55#define SSB_CHIPCO_CAP_ECI 0x20000000 /* ECI available (rev >= 20) */ 55#define SSB_CHIPCO_CAP_ECI 0x20000000 /* ECI available (rev >= 20) */
56#define SSB_CHIPCO_CAP_SPROM 0x40000000 /* SPROM present */
56#define SSB_CHIPCO_CORECTL 0x0008 57#define SSB_CHIPCO_CORECTL 0x0008
57#define SSB_CHIPCO_CORECTL_UARTCLK0 0x00000001 /* Drive UART with internal clock */ 58#define SSB_CHIPCO_CORECTL_UARTCLK0 0x00000001 /* Drive UART with internal clock */
58#define SSB_CHIPCO_CORECTL_SE 0x00000002 /* sync clk out enable (corerev >= 3) */ 59#define SSB_CHIPCO_CORECTL_SE 0x00000002 /* sync clk out enable (corerev >= 3) */
@@ -385,6 +386,7 @@
385 386
386 387
387/** Chip specific Chip-Status register contents. */ 388/** Chip specific Chip-Status register contents. */
389#define SSB_CHIPCO_CHST_4322_SPROM_EXISTS 0x00000040 /* SPROM present */
388#define SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL 0x00000003 390#define SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL 0x00000003
389#define SSB_CHIPCO_CHST_4325_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */ 391#define SSB_CHIPCO_CHST_4325_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */
390#define SSB_CHIPCO_CHST_4325_SPROM_SEL 1 /* OTP is powered up, SPROM is present */ 392#define SSB_CHIPCO_CHST_4325_SPROM_SEL 1 /* OTP is powered up, SPROM is present */
@@ -398,6 +400,18 @@
398#define SSB_CHIPCO_CHST_4325_RCAL_VALUE_SHIFT 4 400#define SSB_CHIPCO_CHST_4325_RCAL_VALUE_SHIFT 4
399#define SSB_CHIPCO_CHST_4325_PMUTOP_2B 0x00000200 /* 1 for 2b, 0 for to 2a */ 401#define SSB_CHIPCO_CHST_4325_PMUTOP_2B 0x00000200 /* 1 for 2b, 0 for to 2a */
400 402
403/** Macros to determine SPROM presence based on Chip-Status register. */
404#define SSB_CHIPCO_CHST_4312_SPROM_PRESENT(status) \
405 ((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \
406 SSB_CHIPCO_CHST_4325_OTP_SEL)
407#define SSB_CHIPCO_CHST_4322_SPROM_PRESENT(status) \
408 (status & SSB_CHIPCO_CHST_4322_SPROM_EXISTS)
409#define SSB_CHIPCO_CHST_4325_SPROM_PRESENT(status) \
410 (((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \
411 SSB_CHIPCO_CHST_4325_DEFCIS_SEL) && \
412 ((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \
413 SSB_CHIPCO_CHST_4325_OTP_SEL))
414
401 415
402 416
403/** Clockcontrol masks and values **/ 417/** Clockcontrol masks and values **/
@@ -564,6 +578,7 @@ struct ssb_chipcommon_pmu {
564struct ssb_chipcommon { 578struct ssb_chipcommon {
565 struct ssb_device *dev; 579 struct ssb_device *dev;
566 u32 capabilities; 580 u32 capabilities;
581 u32 status;
567 /* Fast Powerup Delay constant */ 582 /* Fast Powerup Delay constant */
568 u16 fast_pwrup_delay; 583 u16 fast_pwrup_delay;
569 struct ssb_chipcommon_pmu pmu; 584 struct ssb_chipcommon_pmu pmu;
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h
index 9ae9082eaeb4..a6d5225b9275 100644
--- a/include/linux/ssb/ssb_regs.h
+++ b/include/linux/ssb/ssb_regs.h
@@ -170,26 +170,27 @@
170#define SSB_SPROMSIZE_WORDS_R4 220 170#define SSB_SPROMSIZE_WORDS_R4 220
171#define SSB_SPROMSIZE_BYTES_R123 (SSB_SPROMSIZE_WORDS_R123 * sizeof(u16)) 171#define SSB_SPROMSIZE_BYTES_R123 (SSB_SPROMSIZE_WORDS_R123 * sizeof(u16))
172#define SSB_SPROMSIZE_BYTES_R4 (SSB_SPROMSIZE_WORDS_R4 * sizeof(u16)) 172#define SSB_SPROMSIZE_BYTES_R4 (SSB_SPROMSIZE_WORDS_R4 * sizeof(u16))
173#define SSB_SPROM_BASE 0x1000 173#define SSB_SPROM_BASE1 0x1000
174#define SSB_SPROM_REVISION 0x107E 174#define SSB_SPROM_BASE31 0x0800
175#define SSB_SPROM_REVISION 0x007E
175#define SSB_SPROM_REVISION_REV 0x00FF /* SPROM Revision number */ 176#define SSB_SPROM_REVISION_REV 0x00FF /* SPROM Revision number */
176#define SSB_SPROM_REVISION_CRC 0xFF00 /* SPROM CRC8 value */ 177#define SSB_SPROM_REVISION_CRC 0xFF00 /* SPROM CRC8 value */
177#define SSB_SPROM_REVISION_CRC_SHIFT 8 178#define SSB_SPROM_REVISION_CRC_SHIFT 8
178 179
179/* SPROM Revision 1 */ 180/* SPROM Revision 1 */
180#define SSB_SPROM1_SPID 0x1004 /* Subsystem Product ID for PCI */ 181#define SSB_SPROM1_SPID 0x0004 /* Subsystem Product ID for PCI */
181#define SSB_SPROM1_SVID 0x1006 /* Subsystem Vendor ID for PCI */ 182#define SSB_SPROM1_SVID 0x0006 /* Subsystem Vendor ID for PCI */
182#define SSB_SPROM1_PID 0x1008 /* Product ID for PCI */ 183#define SSB_SPROM1_PID 0x0008 /* Product ID for PCI */
183#define SSB_SPROM1_IL0MAC 0x1048 /* 6 bytes MAC address for 802.11b/g */ 184#define SSB_SPROM1_IL0MAC 0x0048 /* 6 bytes MAC address for 802.11b/g */
184#define SSB_SPROM1_ET0MAC 0x104E /* 6 bytes MAC address for Ethernet */ 185#define SSB_SPROM1_ET0MAC 0x004E /* 6 bytes MAC address for Ethernet */
185#define SSB_SPROM1_ET1MAC 0x1054 /* 6 bytes MAC address for 802.11a */ 186#define SSB_SPROM1_ET1MAC 0x0054 /* 6 bytes MAC address for 802.11a */
186#define SSB_SPROM1_ETHPHY 0x105A /* Ethernet PHY settings */ 187#define SSB_SPROM1_ETHPHY 0x005A /* Ethernet PHY settings */
187#define SSB_SPROM1_ETHPHY_ET0A 0x001F /* MII Address for enet0 */ 188#define SSB_SPROM1_ETHPHY_ET0A 0x001F /* MII Address for enet0 */
188#define SSB_SPROM1_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */ 189#define SSB_SPROM1_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */
189#define SSB_SPROM1_ETHPHY_ET1A_SHIFT 5 190#define SSB_SPROM1_ETHPHY_ET1A_SHIFT 5
190#define SSB_SPROM1_ETHPHY_ET0M (1<<14) /* MDIO for enet0 */ 191#define SSB_SPROM1_ETHPHY_ET0M (1<<14) /* MDIO for enet0 */
191#define SSB_SPROM1_ETHPHY_ET1M (1<<15) /* MDIO for enet1 */ 192#define SSB_SPROM1_ETHPHY_ET1M (1<<15) /* MDIO for enet1 */
192#define SSB_SPROM1_BINF 0x105C /* Board info */ 193#define SSB_SPROM1_BINF 0x005C /* Board info */
193#define SSB_SPROM1_BINF_BREV 0x00FF /* Board Revision */ 194#define SSB_SPROM1_BINF_BREV 0x00FF /* Board Revision */
194#define SSB_SPROM1_BINF_CCODE 0x0F00 /* Country Code */ 195#define SSB_SPROM1_BINF_CCODE 0x0F00 /* Country Code */
195#define SSB_SPROM1_BINF_CCODE_SHIFT 8 196#define SSB_SPROM1_BINF_CCODE_SHIFT 8
@@ -197,63 +198,63 @@
197#define SSB_SPROM1_BINF_ANTBG_SHIFT 12 198#define SSB_SPROM1_BINF_ANTBG_SHIFT 12
198#define SSB_SPROM1_BINF_ANTA 0xC000 /* Available A-PHY antennas */ 199#define SSB_SPROM1_BINF_ANTA 0xC000 /* Available A-PHY antennas */
199#define SSB_SPROM1_BINF_ANTA_SHIFT 14 200#define SSB_SPROM1_BINF_ANTA_SHIFT 14
200#define SSB_SPROM1_PA0B0 0x105E 201#define SSB_SPROM1_PA0B0 0x005E
201#define SSB_SPROM1_PA0B1 0x1060 202#define SSB_SPROM1_PA0B1 0x0060
202#define SSB_SPROM1_PA0B2 0x1062 203#define SSB_SPROM1_PA0B2 0x0062
203#define SSB_SPROM1_GPIOA 0x1064 /* General Purpose IO pins 0 and 1 */ 204#define SSB_SPROM1_GPIOA 0x0064 /* General Purpose IO pins 0 and 1 */
204#define SSB_SPROM1_GPIOA_P0 0x00FF /* Pin 0 */ 205#define SSB_SPROM1_GPIOA_P0 0x00FF /* Pin 0 */
205#define SSB_SPROM1_GPIOA_P1 0xFF00 /* Pin 1 */ 206#define SSB_SPROM1_GPIOA_P1 0xFF00 /* Pin 1 */
206#define SSB_SPROM1_GPIOA_P1_SHIFT 8 207#define SSB_SPROM1_GPIOA_P1_SHIFT 8
207#define SSB_SPROM1_GPIOB 0x1066 /* General Purpuse IO pins 2 and 3 */ 208#define SSB_SPROM1_GPIOB 0x0066 /* General Purpuse IO pins 2 and 3 */
208#define SSB_SPROM1_GPIOB_P2 0x00FF /* Pin 2 */ 209#define SSB_SPROM1_GPIOB_P2 0x00FF /* Pin 2 */
209#define SSB_SPROM1_GPIOB_P3 0xFF00 /* Pin 3 */ 210#define SSB_SPROM1_GPIOB_P3 0xFF00 /* Pin 3 */
210#define SSB_SPROM1_GPIOB_P3_SHIFT 8 211#define SSB_SPROM1_GPIOB_P3_SHIFT 8
211#define SSB_SPROM1_MAXPWR 0x1068 /* Power Amplifier Max Power */ 212#define SSB_SPROM1_MAXPWR 0x0068 /* Power Amplifier Max Power */
212#define SSB_SPROM1_MAXPWR_BG 0x00FF /* B-PHY and G-PHY (in dBm Q5.2) */ 213#define SSB_SPROM1_MAXPWR_BG 0x00FF /* B-PHY and G-PHY (in dBm Q5.2) */
213#define SSB_SPROM1_MAXPWR_A 0xFF00 /* A-PHY (in dBm Q5.2) */ 214#define SSB_SPROM1_MAXPWR_A 0xFF00 /* A-PHY (in dBm Q5.2) */
214#define SSB_SPROM1_MAXPWR_A_SHIFT 8 215#define SSB_SPROM1_MAXPWR_A_SHIFT 8
215#define SSB_SPROM1_PA1B0 0x106A 216#define SSB_SPROM1_PA1B0 0x006A
216#define SSB_SPROM1_PA1B1 0x106C 217#define SSB_SPROM1_PA1B1 0x006C
217#define SSB_SPROM1_PA1B2 0x106E 218#define SSB_SPROM1_PA1B2 0x006E
218#define SSB_SPROM1_ITSSI 0x1070 /* Idle TSSI Target */ 219#define SSB_SPROM1_ITSSI 0x0070 /* Idle TSSI Target */
219#define SSB_SPROM1_ITSSI_BG 0x00FF /* B-PHY and G-PHY*/ 220#define SSB_SPROM1_ITSSI_BG 0x00FF /* B-PHY and G-PHY*/
220#define SSB_SPROM1_ITSSI_A 0xFF00 /* A-PHY */ 221#define SSB_SPROM1_ITSSI_A 0xFF00 /* A-PHY */
221#define SSB_SPROM1_ITSSI_A_SHIFT 8 222#define SSB_SPROM1_ITSSI_A_SHIFT 8
222#define SSB_SPROM1_BFLLO 0x1072 /* Boardflags (low 16 bits) */ 223#define SSB_SPROM1_BFLLO 0x0072 /* Boardflags (low 16 bits) */
223#define SSB_SPROM1_AGAIN 0x1074 /* Antenna Gain (in dBm Q5.2) */ 224#define SSB_SPROM1_AGAIN 0x0074 /* Antenna Gain (in dBm Q5.2) */
224#define SSB_SPROM1_AGAIN_BG 0x00FF /* B-PHY and G-PHY */ 225#define SSB_SPROM1_AGAIN_BG 0x00FF /* B-PHY and G-PHY */
225#define SSB_SPROM1_AGAIN_BG_SHIFT 0 226#define SSB_SPROM1_AGAIN_BG_SHIFT 0
226#define SSB_SPROM1_AGAIN_A 0xFF00 /* A-PHY */ 227#define SSB_SPROM1_AGAIN_A 0xFF00 /* A-PHY */
227#define SSB_SPROM1_AGAIN_A_SHIFT 8 228#define SSB_SPROM1_AGAIN_A_SHIFT 8
228 229
229/* SPROM Revision 2 (inherits from rev 1) */ 230/* SPROM Revision 2 (inherits from rev 1) */
230#define SSB_SPROM2_BFLHI 0x1038 /* Boardflags (high 16 bits) */ 231#define SSB_SPROM2_BFLHI 0x0038 /* Boardflags (high 16 bits) */
231#define SSB_SPROM2_MAXP_A 0x103A /* A-PHY Max Power */ 232#define SSB_SPROM2_MAXP_A 0x003A /* A-PHY Max Power */
232#define SSB_SPROM2_MAXP_A_HI 0x00FF /* Max Power High */ 233#define SSB_SPROM2_MAXP_A_HI 0x00FF /* Max Power High */
233#define SSB_SPROM2_MAXP_A_LO 0xFF00 /* Max Power Low */ 234#define SSB_SPROM2_MAXP_A_LO 0xFF00 /* Max Power Low */
234#define SSB_SPROM2_MAXP_A_LO_SHIFT 8 235#define SSB_SPROM2_MAXP_A_LO_SHIFT 8
235#define SSB_SPROM2_PA1LOB0 0x103C /* A-PHY PowerAmplifier Low Settings */ 236#define SSB_SPROM2_PA1LOB0 0x003C /* A-PHY PowerAmplifier Low Settings */
236#define SSB_SPROM2_PA1LOB1 0x103E /* A-PHY PowerAmplifier Low Settings */ 237#define SSB_SPROM2_PA1LOB1 0x003E /* A-PHY PowerAmplifier Low Settings */
237#define SSB_SPROM2_PA1LOB2 0x1040 /* A-PHY PowerAmplifier Low Settings */ 238#define SSB_SPROM2_PA1LOB2 0x0040 /* A-PHY PowerAmplifier Low Settings */
238#define SSB_SPROM2_PA1HIB0 0x1042 /* A-PHY PowerAmplifier High Settings */ 239#define SSB_SPROM2_PA1HIB0 0x0042 /* A-PHY PowerAmplifier High Settings */
239#define SSB_SPROM2_PA1HIB1 0x1044 /* A-PHY PowerAmplifier High Settings */ 240#define SSB_SPROM2_PA1HIB1 0x0044 /* A-PHY PowerAmplifier High Settings */
240#define SSB_SPROM2_PA1HIB2 0x1046 /* A-PHY PowerAmplifier High Settings */ 241#define SSB_SPROM2_PA1HIB2 0x0046 /* A-PHY PowerAmplifier High Settings */
241#define SSB_SPROM2_OPO 0x1078 /* OFDM Power Offset from CCK Level */ 242#define SSB_SPROM2_OPO 0x0078 /* OFDM Power Offset from CCK Level */
242#define SSB_SPROM2_OPO_VALUE 0x00FF 243#define SSB_SPROM2_OPO_VALUE 0x00FF
243#define SSB_SPROM2_OPO_UNUSED 0xFF00 244#define SSB_SPROM2_OPO_UNUSED 0xFF00
244#define SSB_SPROM2_CCODE 0x107C /* Two char Country Code */ 245#define SSB_SPROM2_CCODE 0x007C /* Two char Country Code */
245 246
246/* SPROM Revision 3 (inherits most data from rev 2) */ 247/* SPROM Revision 3 (inherits most data from rev 2) */
247#define SSB_SPROM3_IL0MAC 0x104A /* 6 bytes MAC address for 802.11b/g */ 248#define SSB_SPROM3_OFDMAPO 0x002C /* A-PHY OFDM Mid Power Offset (4 bytes, BigEndian) */
248#define SSB_SPROM3_OFDMAPO 0x102C /* A-PHY OFDM Mid Power Offset (4 bytes, BigEndian) */ 249#define SSB_SPROM3_OFDMALPO 0x0030 /* A-PHY OFDM Low Power Offset (4 bytes, BigEndian) */
249#define SSB_SPROM3_OFDMALPO 0x1030 /* A-PHY OFDM Low Power Offset (4 bytes, BigEndian) */ 250#define SSB_SPROM3_OFDMAHPO 0x0034 /* A-PHY OFDM High Power Offset (4 bytes, BigEndian) */
250#define SSB_SPROM3_OFDMAHPO 0x1034 /* A-PHY OFDM High Power Offset (4 bytes, BigEndian) */ 251#define SSB_SPROM3_GPIOLDC 0x0042 /* GPIO LED Powersave Duty Cycle (4 bytes, BigEndian) */
251#define SSB_SPROM3_GPIOLDC 0x1042 /* GPIO LED Powersave Duty Cycle (4 bytes, BigEndian) */
252#define SSB_SPROM3_GPIOLDC_OFF 0x0000FF00 /* Off Count */ 252#define SSB_SPROM3_GPIOLDC_OFF 0x0000FF00 /* Off Count */
253#define SSB_SPROM3_GPIOLDC_OFF_SHIFT 8 253#define SSB_SPROM3_GPIOLDC_OFF_SHIFT 8
254#define SSB_SPROM3_GPIOLDC_ON 0x00FF0000 /* On Count */ 254#define SSB_SPROM3_GPIOLDC_ON 0x00FF0000 /* On Count */
255#define SSB_SPROM3_GPIOLDC_ON_SHIFT 16 255#define SSB_SPROM3_GPIOLDC_ON_SHIFT 16
256#define SSB_SPROM3_CCKPO 0x1078 /* CCK Power Offset */ 256#define SSB_SPROM3_IL0MAC 0x004A /* 6 bytes MAC address for 802.11b/g */
257#define SSB_SPROM3_CCKPO 0x0078 /* CCK Power Offset */
257#define SSB_SPROM3_CCKPO_1M 0x000F /* 1M Rate PO */ 258#define SSB_SPROM3_CCKPO_1M 0x000F /* 1M Rate PO */
258#define SSB_SPROM3_CCKPO_2M 0x00F0 /* 2M Rate PO */ 259#define SSB_SPROM3_CCKPO_2M 0x00F0 /* 2M Rate PO */
259#define SSB_SPROM3_CCKPO_2M_SHIFT 4 260#define SSB_SPROM3_CCKPO_2M_SHIFT 4
@@ -264,100 +265,100 @@
264#define SSB_SPROM3_OFDMGPO 0x107A /* G-PHY OFDM Power Offset (4 bytes, BigEndian) */ 265#define SSB_SPROM3_OFDMGPO 0x107A /* G-PHY OFDM Power Offset (4 bytes, BigEndian) */
265 266
266/* SPROM Revision 4 */ 267/* SPROM Revision 4 */
267#define SSB_SPROM4_IL0MAC 0x104C /* 6 byte MAC address for a/b/g/n */ 268#define SSB_SPROM4_BFLLO 0x0044 /* Boardflags (low 16 bits) */
268#define SSB_SPROM4_ETHPHY 0x105A /* Ethernet PHY settings ?? */ 269#define SSB_SPROM4_BFLHI 0x0046 /* Board Flags Hi */
270#define SSB_SPROM4_IL0MAC 0x004C /* 6 byte MAC address for a/b/g/n */
271#define SSB_SPROM4_CCODE 0x0052 /* Country Code (2 bytes) */
272#define SSB_SPROM4_GPIOA 0x0056 /* Gen. Purpose IO # 0 and 1 */
273#define SSB_SPROM4_GPIOA_P0 0x00FF /* Pin 0 */
274#define SSB_SPROM4_GPIOA_P1 0xFF00 /* Pin 1 */
275#define SSB_SPROM4_GPIOA_P1_SHIFT 8
276#define SSB_SPROM4_GPIOB 0x0058 /* Gen. Purpose IO # 2 and 3 */
277#define SSB_SPROM4_GPIOB_P2 0x00FF /* Pin 2 */
278#define SSB_SPROM4_GPIOB_P3 0xFF00 /* Pin 3 */
279#define SSB_SPROM4_GPIOB_P3_SHIFT 8
280#define SSB_SPROM4_ETHPHY 0x005A /* Ethernet PHY settings ?? */
269#define SSB_SPROM4_ETHPHY_ET0A 0x001F /* MII Address for enet0 */ 281#define SSB_SPROM4_ETHPHY_ET0A 0x001F /* MII Address for enet0 */
270#define SSB_SPROM4_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */ 282#define SSB_SPROM4_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */
271#define SSB_SPROM4_ETHPHY_ET1A_SHIFT 5 283#define SSB_SPROM4_ETHPHY_ET1A_SHIFT 5
272#define SSB_SPROM4_ETHPHY_ET0M (1<<14) /* MDIO for enet0 */ 284#define SSB_SPROM4_ETHPHY_ET0M (1<<14) /* MDIO for enet0 */
273#define SSB_SPROM4_ETHPHY_ET1M (1<<15) /* MDIO for enet1 */ 285#define SSB_SPROM4_ETHPHY_ET1M (1<<15) /* MDIO for enet1 */
274#define SSB_SPROM4_CCODE 0x1052 /* Country Code (2 bytes) */ 286#define SSB_SPROM4_ANTAVAIL 0x005D /* Antenna available bitfields */
275#define SSB_SPROM4_ANTAVAIL 0x105D /* Antenna available bitfields */ 287#define SSB_SPROM4_ANTAVAIL_A 0x00FF /* A-PHY bitfield */
276#define SSB_SPROM4_ANTAVAIL_A 0x00FF /* A-PHY bitfield */ 288#define SSB_SPROM4_ANTAVAIL_A_SHIFT 0
277#define SSB_SPROM4_ANTAVAIL_A_SHIFT 0 289#define SSB_SPROM4_ANTAVAIL_BG 0xFF00 /* B-PHY and G-PHY bitfield */
278#define SSB_SPROM4_ANTAVAIL_BG 0xFF00 /* B-PHY and G-PHY bitfield */ 290#define SSB_SPROM4_ANTAVAIL_BG_SHIFT 8
279#define SSB_SPROM4_ANTAVAIL_BG_SHIFT 8 291#define SSB_SPROM4_AGAIN01 0x005E /* Antenna Gain (in dBm Q5.2) */
280#define SSB_SPROM4_BFLLO 0x1044 /* Boardflags (low 16 bits) */
281#define SSB_SPROM4_AGAIN01 0x105E /* Antenna Gain (in dBm Q5.2) */
282#define SSB_SPROM4_AGAIN0 0x00FF /* Antenna 0 */ 292#define SSB_SPROM4_AGAIN0 0x00FF /* Antenna 0 */
283#define SSB_SPROM4_AGAIN0_SHIFT 0 293#define SSB_SPROM4_AGAIN0_SHIFT 0
284#define SSB_SPROM4_AGAIN1 0xFF00 /* Antenna 1 */ 294#define SSB_SPROM4_AGAIN1 0xFF00 /* Antenna 1 */
285#define SSB_SPROM4_AGAIN1_SHIFT 8 295#define SSB_SPROM4_AGAIN1_SHIFT 8
286#define SSB_SPROM4_AGAIN23 0x1060 296#define SSB_SPROM4_AGAIN23 0x0060
287#define SSB_SPROM4_AGAIN2 0x00FF /* Antenna 2 */ 297#define SSB_SPROM4_AGAIN2 0x00FF /* Antenna 2 */
288#define SSB_SPROM4_AGAIN2_SHIFT 0 298#define SSB_SPROM4_AGAIN2_SHIFT 0
289#define SSB_SPROM4_AGAIN3 0xFF00 /* Antenna 3 */ 299#define SSB_SPROM4_AGAIN3 0xFF00 /* Antenna 3 */
290#define SSB_SPROM4_AGAIN3_SHIFT 8 300#define SSB_SPROM4_AGAIN3_SHIFT 8
291#define SSB_SPROM4_BFLHI 0x1046 /* Board Flags Hi */ 301#define SSB_SPROM4_MAXP_BG 0x0080 /* Max Power BG in path 1 */
292#define SSB_SPROM4_MAXP_BG 0x1080 /* Max Power BG in path 1 */
293#define SSB_SPROM4_MAXP_BG_MASK 0x00FF /* Mask for Max Power BG */ 302#define SSB_SPROM4_MAXP_BG_MASK 0x00FF /* Mask for Max Power BG */
294#define SSB_SPROM4_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */ 303#define SSB_SPROM4_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */
295#define SSB_SPROM4_ITSSI_BG_SHIFT 8 304#define SSB_SPROM4_ITSSI_BG_SHIFT 8
296#define SSB_SPROM4_MAXP_A 0x108A /* Max Power A in path 1 */ 305#define SSB_SPROM4_MAXP_A 0x008A /* Max Power A in path 1 */
297#define SSB_SPROM4_MAXP_A_MASK 0x00FF /* Mask for Max Power A */ 306#define SSB_SPROM4_MAXP_A_MASK 0x00FF /* Mask for Max Power A */
298#define SSB_SPROM4_ITSSI_A 0xFF00 /* Mask for path 1 itssi_a */ 307#define SSB_SPROM4_ITSSI_A 0xFF00 /* Mask for path 1 itssi_a */
299#define SSB_SPROM4_ITSSI_A_SHIFT 8 308#define SSB_SPROM4_ITSSI_A_SHIFT 8
300#define SSB_SPROM4_GPIOA 0x1056 /* Gen. Purpose IO # 0 and 1 */ 309#define SSB_SPROM4_PA0B0 0x0082 /* The paXbY locations are */
301#define SSB_SPROM4_GPIOA_P0 0x00FF /* Pin 0 */ 310#define SSB_SPROM4_PA0B1 0x0084 /* only guesses */
302#define SSB_SPROM4_GPIOA_P1 0xFF00 /* Pin 1 */ 311#define SSB_SPROM4_PA0B2 0x0086
303#define SSB_SPROM4_GPIOA_P1_SHIFT 8 312#define SSB_SPROM4_PA1B0 0x008E
304#define SSB_SPROM4_GPIOB 0x1058 /* Gen. Purpose IO # 2 and 3 */ 313#define SSB_SPROM4_PA1B1 0x0090
305#define SSB_SPROM4_GPIOB_P2 0x00FF /* Pin 2 */ 314#define SSB_SPROM4_PA1B2 0x0092
306#define SSB_SPROM4_GPIOB_P3 0xFF00 /* Pin 3 */
307#define SSB_SPROM4_GPIOB_P3_SHIFT 8
308#define SSB_SPROM4_PA0B0 0x1082 /* The paXbY locations are */
309#define SSB_SPROM4_PA0B1 0x1084 /* only guesses */
310#define SSB_SPROM4_PA0B2 0x1086
311#define SSB_SPROM4_PA1B0 0x108E
312#define SSB_SPROM4_PA1B1 0x1090
313#define SSB_SPROM4_PA1B2 0x1092
314 315
315/* SPROM Revision 5 (inherits most data from rev 4) */ 316/* SPROM Revision 5 (inherits most data from rev 4) */
316#define SSB_SPROM5_BFLLO 0x104A /* Boardflags (low 16 bits) */ 317#define SSB_SPROM5_CCODE 0x0044 /* Country Code (2 bytes) */
317#define SSB_SPROM5_BFLHI 0x104C /* Board Flags Hi */ 318#define SSB_SPROM5_BFLLO 0x004A /* Boardflags (low 16 bits) */
318#define SSB_SPROM5_IL0MAC 0x1052 /* 6 byte MAC address for a/b/g/n */ 319#define SSB_SPROM5_BFLHI 0x004C /* Board Flags Hi */
319#define SSB_SPROM5_CCODE 0x1044 /* Country Code (2 bytes) */ 320#define SSB_SPROM5_IL0MAC 0x0052 /* 6 byte MAC address for a/b/g/n */
320#define SSB_SPROM5_GPIOA 0x1076 /* Gen. Purpose IO # 0 and 1 */ 321#define SSB_SPROM5_GPIOA 0x0076 /* Gen. Purpose IO # 0 and 1 */
321#define SSB_SPROM5_GPIOA_P0 0x00FF /* Pin 0 */ 322#define SSB_SPROM5_GPIOA_P0 0x00FF /* Pin 0 */
322#define SSB_SPROM5_GPIOA_P1 0xFF00 /* Pin 1 */ 323#define SSB_SPROM5_GPIOA_P1 0xFF00 /* Pin 1 */
323#define SSB_SPROM5_GPIOA_P1_SHIFT 8 324#define SSB_SPROM5_GPIOA_P1_SHIFT 8
324#define SSB_SPROM5_GPIOB 0x1078 /* Gen. Purpose IO # 2 and 3 */ 325#define SSB_SPROM5_GPIOB 0x0078 /* Gen. Purpose IO # 2 and 3 */
325#define SSB_SPROM5_GPIOB_P2 0x00FF /* Pin 2 */ 326#define SSB_SPROM5_GPIOB_P2 0x00FF /* Pin 2 */
326#define SSB_SPROM5_GPIOB_P3 0xFF00 /* Pin 3 */ 327#define SSB_SPROM5_GPIOB_P3 0xFF00 /* Pin 3 */
327#define SSB_SPROM5_GPIOB_P3_SHIFT 8 328#define SSB_SPROM5_GPIOB_P3_SHIFT 8
328 329
329/* SPROM Revision 8 */ 330/* SPROM Revision 8 */
330#define SSB_SPROM8_BOARDREV 0x1082 /* Board revision */ 331#define SSB_SPROM8_BOARDREV 0x0082 /* Board revision */
331#define SSB_SPROM8_BFLLO 0x1084 /* Board flags (bits 0-15) */ 332#define SSB_SPROM8_BFLLO 0x0084 /* Board flags (bits 0-15) */
332#define SSB_SPROM8_BFLHI 0x1086 /* Board flags (bits 16-31) */ 333#define SSB_SPROM8_BFLHI 0x0086 /* Board flags (bits 16-31) */
333#define SSB_SPROM8_BFL2LO 0x1088 /* Board flags (bits 32-47) */ 334#define SSB_SPROM8_BFL2LO 0x0088 /* Board flags (bits 32-47) */
334#define SSB_SPROM8_BFL2HI 0x108A /* Board flags (bits 48-63) */ 335#define SSB_SPROM8_BFL2HI 0x008A /* Board flags (bits 48-63) */
335#define SSB_SPROM8_IL0MAC 0x108C /* 6 byte MAC address */ 336#define SSB_SPROM8_IL0MAC 0x008C /* 6 byte MAC address */
336#define SSB_SPROM8_CCODE 0x1092 /* 2 byte country code */ 337#define SSB_SPROM8_CCODE 0x0092 /* 2 byte country code */
337#define SSB_SPROM8_ANTAVAIL 0x109C /* Antenna available bitfields*/ 338#define SSB_SPROM8_GPIOA 0x0096 /*Gen. Purpose IO # 0 and 1 */
338#define SSB_SPROM8_ANTAVAIL_A 0xFF00 /* A-PHY bitfield */ 339#define SSB_SPROM8_GPIOA_P0 0x00FF /* Pin 0 */
339#define SSB_SPROM8_ANTAVAIL_A_SHIFT 8 340#define SSB_SPROM8_GPIOA_P1 0xFF00 /* Pin 1 */
340#define SSB_SPROM8_ANTAVAIL_BG 0x00FF /* B-PHY and G-PHY bitfield */ 341#define SSB_SPROM8_GPIOA_P1_SHIFT 8
341#define SSB_SPROM8_ANTAVAIL_BG_SHIFT 0 342#define SSB_SPROM8_GPIOB 0x0098 /* Gen. Purpose IO # 2 and 3 */
342#define SSB_SPROM8_AGAIN01 0x109E /* Antenna Gain (in dBm Q5.2) */ 343#define SSB_SPROM8_GPIOB_P2 0x00FF /* Pin 2 */
344#define SSB_SPROM8_GPIOB_P3 0xFF00 /* Pin 3 */
345#define SSB_SPROM8_GPIOB_P3_SHIFT 8
346#define SSB_SPROM8_ANTAVAIL 0x009C /* Antenna available bitfields*/
347#define SSB_SPROM8_ANTAVAIL_A 0xFF00 /* A-PHY bitfield */
348#define SSB_SPROM8_ANTAVAIL_A_SHIFT 8
349#define SSB_SPROM8_ANTAVAIL_BG 0x00FF /* B-PHY and G-PHY bitfield */
350#define SSB_SPROM8_ANTAVAIL_BG_SHIFT 0
351#define SSB_SPROM8_AGAIN01 0x009E /* Antenna Gain (in dBm Q5.2) */
343#define SSB_SPROM8_AGAIN0 0x00FF /* Antenna 0 */ 352#define SSB_SPROM8_AGAIN0 0x00FF /* Antenna 0 */
344#define SSB_SPROM8_AGAIN0_SHIFT 0 353#define SSB_SPROM8_AGAIN0_SHIFT 0
345#define SSB_SPROM8_AGAIN1 0xFF00 /* Antenna 1 */ 354#define SSB_SPROM8_AGAIN1 0xFF00 /* Antenna 1 */
346#define SSB_SPROM8_AGAIN1_SHIFT 8 355#define SSB_SPROM8_AGAIN1_SHIFT 8
347#define SSB_SPROM8_AGAIN23 0x10A0 356#define SSB_SPROM8_AGAIN23 0x00A0
348#define SSB_SPROM8_AGAIN2 0x00FF /* Antenna 2 */ 357#define SSB_SPROM8_AGAIN2 0x00FF /* Antenna 2 */
349#define SSB_SPROM8_AGAIN2_SHIFT 0 358#define SSB_SPROM8_AGAIN2_SHIFT 0
350#define SSB_SPROM8_AGAIN3 0xFF00 /* Antenna 3 */ 359#define SSB_SPROM8_AGAIN3 0xFF00 /* Antenna 3 */
351#define SSB_SPROM8_AGAIN3_SHIFT 8 360#define SSB_SPROM8_AGAIN3_SHIFT 8
352#define SSB_SPROM8_GPIOA 0x1096 /*Gen. Purpose IO # 0 and 1 */ 361#define SSB_SPROM8_RSSIPARM2G 0x00A4 /* RSSI params for 2GHz */
353#define SSB_SPROM8_GPIOA_P0 0x00FF /* Pin 0 */
354#define SSB_SPROM8_GPIOA_P1 0xFF00 /* Pin 1 */
355#define SSB_SPROM8_GPIOA_P1_SHIFT 8
356#define SSB_SPROM8_GPIOB 0x1098 /* Gen. Purpose IO # 2 and 3 */
357#define SSB_SPROM8_GPIOB_P2 0x00FF /* Pin 2 */
358#define SSB_SPROM8_GPIOB_P3 0xFF00 /* Pin 3 */
359#define SSB_SPROM8_GPIOB_P3_SHIFT 8
360#define SSB_SPROM8_RSSIPARM2G 0x10A4 /* RSSI params for 2GHz */
361#define SSB_SPROM8_RSSISMF2G 0x000F 362#define SSB_SPROM8_RSSISMF2G 0x000F
362#define SSB_SPROM8_RSSISMC2G 0x00F0 363#define SSB_SPROM8_RSSISMC2G 0x00F0
363#define SSB_SPROM8_RSSISMC2G_SHIFT 4 364#define SSB_SPROM8_RSSISMC2G_SHIFT 4
@@ -365,7 +366,7 @@
365#define SSB_SPROM8_RSSISAV2G_SHIFT 8 366#define SSB_SPROM8_RSSISAV2G_SHIFT 8
366#define SSB_SPROM8_BXA2G 0x1800 367#define SSB_SPROM8_BXA2G 0x1800
367#define SSB_SPROM8_BXA2G_SHIFT 11 368#define SSB_SPROM8_BXA2G_SHIFT 11
368#define SSB_SPROM8_RSSIPARM5G 0x10A6 /* RSSI params for 5GHz */ 369#define SSB_SPROM8_RSSIPARM5G 0x00A6 /* RSSI params for 5GHz */
369#define SSB_SPROM8_RSSISMF5G 0x000F 370#define SSB_SPROM8_RSSISMF5G 0x000F
370#define SSB_SPROM8_RSSISMC5G 0x00F0 371#define SSB_SPROM8_RSSISMC5G 0x00F0
371#define SSB_SPROM8_RSSISMC5G_SHIFT 4 372#define SSB_SPROM8_RSSISMC5G_SHIFT 4
@@ -373,47 +374,47 @@
373#define SSB_SPROM8_RSSISAV5G_SHIFT 8 374#define SSB_SPROM8_RSSISAV5G_SHIFT 8
374#define SSB_SPROM8_BXA5G 0x1800 375#define SSB_SPROM8_BXA5G 0x1800
375#define SSB_SPROM8_BXA5G_SHIFT 11 376#define SSB_SPROM8_BXA5G_SHIFT 11
376#define SSB_SPROM8_TRI25G 0x10A8 /* TX isolation 2.4&5.3GHz */ 377#define SSB_SPROM8_TRI25G 0x00A8 /* TX isolation 2.4&5.3GHz */
377#define SSB_SPROM8_TRI2G 0x00FF /* TX isolation 2.4GHz */ 378#define SSB_SPROM8_TRI2G 0x00FF /* TX isolation 2.4GHz */
378#define SSB_SPROM8_TRI5G 0xFF00 /* TX isolation 5.3GHz */ 379#define SSB_SPROM8_TRI5G 0xFF00 /* TX isolation 5.3GHz */
379#define SSB_SPROM8_TRI5G_SHIFT 8 380#define SSB_SPROM8_TRI5G_SHIFT 8
380#define SSB_SPROM8_TRI5GHL 0x10AA /* TX isolation 5.2/5.8GHz */ 381#define SSB_SPROM8_TRI5GHL 0x00AA /* TX isolation 5.2/5.8GHz */
381#define SSB_SPROM8_TRI5GL 0x00FF /* TX isolation 5.2GHz */ 382#define SSB_SPROM8_TRI5GL 0x00FF /* TX isolation 5.2GHz */
382#define SSB_SPROM8_TRI5GH 0xFF00 /* TX isolation 5.8GHz */ 383#define SSB_SPROM8_TRI5GH 0xFF00 /* TX isolation 5.8GHz */
383#define SSB_SPROM8_TRI5GH_SHIFT 8 384#define SSB_SPROM8_TRI5GH_SHIFT 8
384#define SSB_SPROM8_RXPO 0x10AC /* RX power offsets */ 385#define SSB_SPROM8_RXPO 0x00AC /* RX power offsets */
385#define SSB_SPROM8_RXPO2G 0x00FF /* 2GHz RX power offset */ 386#define SSB_SPROM8_RXPO2G 0x00FF /* 2GHz RX power offset */
386#define SSB_SPROM8_RXPO5G 0xFF00 /* 5GHz RX power offset */ 387#define SSB_SPROM8_RXPO5G 0xFF00 /* 5GHz RX power offset */
387#define SSB_SPROM8_RXPO5G_SHIFT 8 388#define SSB_SPROM8_RXPO5G_SHIFT 8
388#define SSB_SPROM8_MAXP_BG 0x10C0 /* Max Power 2GHz in path 1 */ 389#define SSB_SPROM8_MAXP_BG 0x00C0 /* Max Power 2GHz in path 1 */
389#define SSB_SPROM8_MAXP_BG_MASK 0x00FF /* Mask for Max Power 2GHz */ 390#define SSB_SPROM8_MAXP_BG_MASK 0x00FF /* Mask for Max Power 2GHz */
390#define SSB_SPROM8_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */ 391#define SSB_SPROM8_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */
391#define SSB_SPROM8_ITSSI_BG_SHIFT 8 392#define SSB_SPROM8_ITSSI_BG_SHIFT 8
392#define SSB_SPROM8_PA0B0 0x10C2 /* 2GHz power amp settings */ 393#define SSB_SPROM8_PA0B0 0x00C2 /* 2GHz power amp settings */
393#define SSB_SPROM8_PA0B1 0x10C4 394#define SSB_SPROM8_PA0B1 0x00C4
394#define SSB_SPROM8_PA0B2 0x10C6 395#define SSB_SPROM8_PA0B2 0x00C6
395#define SSB_SPROM8_MAXP_A 0x10C8 /* Max Power 5.3GHz */ 396#define SSB_SPROM8_MAXP_A 0x00C8 /* Max Power 5.3GHz */
396#define SSB_SPROM8_MAXP_A_MASK 0x00FF /* Mask for Max Power 5.3GHz */ 397#define SSB_SPROM8_MAXP_A_MASK 0x00FF /* Mask for Max Power 5.3GHz */
397#define SSB_SPROM8_ITSSI_A 0xFF00 /* Mask for path 1 itssi_a */ 398#define SSB_SPROM8_ITSSI_A 0xFF00 /* Mask for path 1 itssi_a */
398#define SSB_SPROM8_ITSSI_A_SHIFT 8 399#define SSB_SPROM8_ITSSI_A_SHIFT 8
399#define SSB_SPROM8_MAXP_AHL 0x10CA /* Max Power 5.2/5.8GHz */ 400#define SSB_SPROM8_MAXP_AHL 0x00CA /* Max Power 5.2/5.8GHz */
400#define SSB_SPROM8_MAXP_AH_MASK 0x00FF /* Mask for Max Power 5.8GHz */ 401#define SSB_SPROM8_MAXP_AH_MASK 0x00FF /* Mask for Max Power 5.8GHz */
401#define SSB_SPROM8_MAXP_AL_MASK 0xFF00 /* Mask for Max Power 5.2GHz */ 402#define SSB_SPROM8_MAXP_AL_MASK 0xFF00 /* Mask for Max Power 5.2GHz */
402#define SSB_SPROM8_MAXP_AL_SHIFT 8 403#define SSB_SPROM8_MAXP_AL_SHIFT 8
403#define SSB_SPROM8_PA1B0 0x10CC /* 5.3GHz power amp settings */ 404#define SSB_SPROM8_PA1B0 0x00CC /* 5.3GHz power amp settings */
404#define SSB_SPROM8_PA1B1 0x10CE 405#define SSB_SPROM8_PA1B1 0x00CE
405#define SSB_SPROM8_PA1B2 0x10D0 406#define SSB_SPROM8_PA1B2 0x00D0
406#define SSB_SPROM8_PA1LOB0 0x10D2 /* 5.2GHz power amp settings */ 407#define SSB_SPROM8_PA1LOB0 0x00D2 /* 5.2GHz power amp settings */
407#define SSB_SPROM8_PA1LOB1 0x10D4 408#define SSB_SPROM8_PA1LOB1 0x00D4
408#define SSB_SPROM8_PA1LOB2 0x10D6 409#define SSB_SPROM8_PA1LOB2 0x00D6
409#define SSB_SPROM8_PA1HIB0 0x10D8 /* 5.8GHz power amp settings */ 410#define SSB_SPROM8_PA1HIB0 0x00D8 /* 5.8GHz power amp settings */
410#define SSB_SPROM8_PA1HIB1 0x10DA 411#define SSB_SPROM8_PA1HIB1 0x00DA
411#define SSB_SPROM8_PA1HIB2 0x10DC 412#define SSB_SPROM8_PA1HIB2 0x00DC
412#define SSB_SPROM8_CCK2GPO 0x1140 /* CCK power offset */ 413#define SSB_SPROM8_CCK2GPO 0x0140 /* CCK power offset */
413#define SSB_SPROM8_OFDM2GPO 0x1142 /* 2.4GHz OFDM power offset */ 414#define SSB_SPROM8_OFDM2GPO 0x0142 /* 2.4GHz OFDM power offset */
414#define SSB_SPROM8_OFDM5GPO 0x1146 /* 5.3GHz OFDM power offset */ 415#define SSB_SPROM8_OFDM5GPO 0x0146 /* 5.3GHz OFDM power offset */
415#define SSB_SPROM8_OFDM5GLPO 0x114A /* 5.2GHz OFDM power offset */ 416#define SSB_SPROM8_OFDM5GLPO 0x014A /* 5.2GHz OFDM power offset */
416#define SSB_SPROM8_OFDM5GHPO 0x114E /* 5.8GHz OFDM power offset */ 417#define SSB_SPROM8_OFDM5GHPO 0x014E /* 5.8GHz OFDM power offset */
417 418
418/* Values for SSB_SPROM1_BINF_CCODE */ 419/* Values for SSB_SPROM1_BINF_CCODE */
419enum { 420enum {
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 1614d78c60ed..20725e213aee 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -30,7 +30,7 @@ struct unix_skb_parms {
30#endif 30#endif
31}; 31};
32 32
33#define UNIXCB(skb) (*(struct unix_skb_parms*)&((skb)->cb)) 33#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
34#define UNIXCREDS(skb) (&UNIXCB((skb)).creds) 34#define UNIXCREDS(skb) (&UNIXCB((skb)).creds)
35#define UNIXSID(skb) (&UNIXCB((skb)).secid) 35#define UNIXSID(skb) (&UNIXCB((skb)).secid)
36 36
@@ -45,21 +45,23 @@ struct unix_skb_parms {
45struct unix_sock { 45struct unix_sock {
46 /* WARNING: sk has to be the first member */ 46 /* WARNING: sk has to be the first member */
47 struct sock sk; 47 struct sock sk;
48 struct unix_address *addr; 48 struct unix_address *addr;
49 struct dentry *dentry; 49 struct dentry *dentry;
50 struct vfsmount *mnt; 50 struct vfsmount *mnt;
51 struct mutex readlock; 51 struct mutex readlock;
52 struct sock *peer; 52 struct sock *peer;
53 struct sock *other; 53 struct sock *other;
54 struct list_head link; 54 struct list_head link;
55 atomic_long_t inflight; 55 atomic_long_t inflight;
56 spinlock_t lock; 56 spinlock_t lock;
57 unsigned int gc_candidate : 1; 57 unsigned int gc_candidate : 1;
58 unsigned int gc_maybe_cycle : 1; 58 unsigned int gc_maybe_cycle : 1;
59 wait_queue_head_t peer_wait; 59 struct socket_wq peer_wq;
60}; 60};
61#define unix_sk(__sk) ((struct unix_sock *)__sk) 61#define unix_sk(__sk) ((struct unix_sock *)__sk)
62 62
63#define peer_wait peer_wq.wait
64
63#ifdef CONFIG_SYSCTL 65#ifdef CONFIG_SYSCTL
64extern int unix_sysctl_register(struct net *net); 66extern int unix_sysctl_register(struct net *net);
65extern void unix_sysctl_unregister(struct net *net); 67extern void unix_sysctl_unregister(struct net *net);
diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
index 42a7c7867849..318ab9478a44 100644
--- a/include/net/caif/caif_dev.h
+++ b/include/net/caif/caif_dev.h
@@ -23,17 +23,19 @@ struct caif_param {
23}; 23};
24 24
25/** 25/**
26 * caif_connect_request - Request data for CAIF channel setup. 26 * struct caif_connect_request - Request data for CAIF channel setup.
27 * @protocol: Type of CAIF protocol to use (at, datagram etc)
27 * @sockaddr: Socket address to connect. 28 * @sockaddr: Socket address to connect.
28 * @priority: Priority of the connection. 29 * @priority: Priority of the connection.
29 * @link_selector: Link selector (high bandwidth or low latency) 30 * @link_selector: Link selector (high bandwidth or low latency)
30 * @link_name: Name of the CAIF Link Layer to use. 31 * @link_name: Name of the CAIF Link Layer to use.
32 * @param: Connect Request parameters (CAIF_SO_REQ_PARAM).
31 * 33 *
32 * This struct is used when connecting a CAIF channel. 34 * This struct is used when connecting a CAIF channel.
33 * It contains all CAIF channel configuration options. 35 * It contains all CAIF channel configuration options.
34 */ 36 */
35struct caif_connect_request { 37struct caif_connect_request {
36 int protocol; 38 enum caif_protocol_type protocol;
37 struct sockaddr_caif sockaddr; 39 struct sockaddr_caif sockaddr;
38 enum caif_channel_priority priority; 40 enum caif_channel_priority priority;
39 enum caif_link_selector link_selector; 41 enum caif_link_selector link_selector;
@@ -68,6 +70,17 @@ int caif_connect_client(struct caif_connect_request *config,
68int caif_disconnect_client(struct cflayer *client_layer); 70int caif_disconnect_client(struct cflayer *client_layer);
69 71
70/** 72/**
73 * caif_release_client - Release adaptation layer reference to client.
74 *
75 * @client_layer: Client layer.
76 *
77 * Releases a client/adaptation layer use of the caif stack.
78 * This function must be used after caif_disconnect_client to
79 * decrease the reference count of the service layer.
80 */
81void caif_release_client(struct cflayer *client_layer);
82
83/**
71 * connect_req_to_link_param - Translate configuration parameters 84 * connect_req_to_link_param - Translate configuration parameters
72 * from socket format to internal format. 85 * from socket format to internal format.
73 * @cnfg: Pointer to configuration handler 86 * @cnfg: Pointer to configuration handler
diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
index 366082c5d435..9fc2fc20b884 100644
--- a/include/net/caif/cfcnfg.h
+++ b/include/net/caif/cfcnfg.h
@@ -87,13 +87,21 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
87int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer); 87int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer);
88 88
89/** 89/**
90 * cfcnfg_del_adapt_layer - Deletes an adaptation layer from the CAIF stack. 90 * cfcnfg_disconn_adapt_layer - Disconnects an adaptation layer.
91 * 91 *
92 * @cnfg: Pointer to a CAIF configuration object, created by 92 * @cnfg: Pointer to a CAIF configuration object, created by
93 * cfcnfg_create(). 93 * cfcnfg_create().
94 * @adap_layer: Adaptation layer to be removed. 94 * @adap_layer: Adaptation layer to be removed.
95 */ 95 */
96int cfcnfg_del_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer); 96int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg,
97 struct cflayer *adap_layer);
98
99/**
100 * cfcnfg_release_adap_layer - Used by client to release the adaptation layer.
101 *
102 * @adap_layer: Adaptation layer.
103 */
104void cfcnfg_release_adap_layer(struct cflayer *adap_layer);
97 105
98/** 106/**
99 * cfcnfg_add_adaptation_layer - Add an adaptation layer to the CAIF stack. 107 * cfcnfg_add_adaptation_layer - Add an adaptation layer to the CAIF stack.
@@ -102,14 +110,13 @@ int cfcnfg_del_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer);
102 * driver functionality is implemented. 110 * driver functionality is implemented.
103 * 111 *
104 * @cnfg: Pointer to a CAIF configuration object, created by 112 * @cnfg: Pointer to a CAIF configuration object, created by
105 * cfcnfg_create(). 113 * cfcnfg_create().
106 * @param: Link setup parameters. 114 * @param: Link setup parameters.
107 * @adap_layer: Specify the adaptation layer; the receive and 115 * @adap_layer: Specify the adaptation layer; the receive and
108 * flow-control functions MUST be set in the structure. 116 * flow-control functions MUST be set in the structure.
109 * 117 *
110 */ 118 */
111int 119int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
112cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
113 struct cfctrl_link_param *param, 120 struct cfctrl_link_param *param,
114 struct cflayer *adap_layer); 121 struct cflayer *adap_layer);
115 122
diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
index dee25b86caa0..997603f2bf4c 100644
--- a/include/net/caif/cfctrl.h
+++ b/include/net/caif/cfctrl.h
@@ -43,8 +43,7 @@ struct cfctrl_rsp {
43 void (*linksetup_rsp)(struct cflayer *layer, u8 linkid, 43 void (*linksetup_rsp)(struct cflayer *layer, u8 linkid,
44 enum cfctrl_srv serv, u8 phyid, 44 enum cfctrl_srv serv, u8 phyid,
45 struct cflayer *adapt_layer); 45 struct cflayer *adapt_layer);
46 void (*linkdestroy_rsp)(struct cflayer *layer, u8 linkid, 46 void (*linkdestroy_rsp)(struct cflayer *layer, u8 linkid);
47 struct cflayer *client_layer);
48 void (*linkerror_ind)(void); 47 void (*linkerror_ind)(void);
49 void (*enum_rsp)(void); 48 void (*enum_rsp)(void);
50 void (*sleep_rsp)(void); 49 void (*sleep_rsp)(void);
@@ -117,7 +116,7 @@ struct cfctrl {
117}; 116};
118 117
119void cfctrl_enum_req(struct cflayer *cfctrl, u8 physlinkid); 118void cfctrl_enum_req(struct cflayer *cfctrl, u8 physlinkid);
120void cfctrl_linkup_request(struct cflayer *cfctrl, 119int cfctrl_linkup_request(struct cflayer *cfctrl,
121 struct cfctrl_link_param *param, 120 struct cfctrl_link_param *param,
122 struct cflayer *user_layer); 121 struct cflayer *user_layer);
123int cfctrl_linkdown_req(struct cflayer *cfctrl, u8 linkid, 122int cfctrl_linkdown_req(struct cflayer *cfctrl, u8 linkid,
@@ -135,4 +134,6 @@ void cfctrl_insert_req(struct cfctrl *ctrl,
135 struct cfctrl_request_info *req); 134 struct cfctrl_request_info *req);
136struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, 135struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
137 struct cfctrl_request_info *req); 136 struct cfctrl_request_info *req);
137void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer);
138
138#endif /* CFCTRL_H_ */ 139#endif /* CFCTRL_H_ */
diff --git a/include/net/caif/cfsrvl.h b/include/net/caif/cfsrvl.h
index b2a12db20cd2..2dc9eb193ecf 100644
--- a/include/net/caif/cfsrvl.h
+++ b/include/net/caif/cfsrvl.h
@@ -9,14 +9,18 @@
9#include <linux/list.h> 9#include <linux/list.h>
10#include <linux/stddef.h> 10#include <linux/stddef.h>
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/kref.h>
13
12struct cfsrvl { 14struct cfsrvl {
13 struct cflayer layer; 15 struct cflayer layer;
14 bool open; 16 bool open;
15 bool phy_flow_on; 17 bool phy_flow_on;
16 bool modem_flow_on; 18 bool modem_flow_on;
17 struct dev_info dev_info; 19 struct dev_info dev_info;
20 struct kref ref;
18}; 21};
19 22
23void cfsrvl_release(struct kref *kref);
20struct cflayer *cfvei_create(u8 linkid, struct dev_info *dev_info); 24struct cflayer *cfvei_create(u8 linkid, struct dev_info *dev_info);
21struct cflayer *cfdgml_create(u8 linkid, struct dev_info *dev_info); 25struct cflayer *cfdgml_create(u8 linkid, struct dev_info *dev_info);
22struct cflayer *cfutill_create(u8 linkid, struct dev_info *dev_info); 26struct cflayer *cfutill_create(u8 linkid, struct dev_info *dev_info);
@@ -31,4 +35,22 @@ void cfsrvl_init(struct cfsrvl *service,
31bool cfsrvl_ready(struct cfsrvl *service, int *err); 35bool cfsrvl_ready(struct cfsrvl *service, int *err);
32u8 cfsrvl_getphyid(struct cflayer *layer); 36u8 cfsrvl_getphyid(struct cflayer *layer);
33 37
38static inline void cfsrvl_get(struct cflayer *layr)
39{
40 struct cfsrvl *s;
41 if (layr == NULL)
42 return;
43 s = container_of(layr, struct cfsrvl, layer);
44 kref_get(&s->ref);
45}
46
47static inline void cfsrvl_put(struct cflayer *layr)
48{
49 struct cfsrvl *s;
50 if (layr == NULL)
51 return;
52 s = container_of(layr, struct cfsrvl, layer);
53 kref_put(&s->ref, cfsrvl_release);
54}
55
34#endif /* CFSRVL_H_ */ 56#endif /* CFSRVL_H_ */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 37cebd3aa0f7..7d10c0182f53 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -511,6 +511,7 @@ struct mpath_info {
511 * @basic_rates: basic rates in IEEE 802.11 format 511 * @basic_rates: basic rates in IEEE 802.11 format
512 * (or NULL for no change) 512 * (or NULL for no change)
513 * @basic_rates_len: number of basic rates 513 * @basic_rates_len: number of basic rates
514 * @ap_isolate: do not forward packets between connected stations
514 */ 515 */
515struct bss_parameters { 516struct bss_parameters {
516 int use_cts_prot; 517 int use_cts_prot;
@@ -518,6 +519,7 @@ struct bss_parameters {
518 int use_short_slot_time; 519 int use_short_slot_time;
519 u8 *basic_rates; 520 u8 *basic_rates;
520 u8 basic_rates_len; 521 u8 basic_rates_len;
522 int ap_isolate;
521}; 523};
522 524
523struct mesh_config { 525struct mesh_config {
@@ -1018,6 +1020,8 @@ struct cfg80211_pmksa {
1018 * RSN IE. It allows for faster roaming between WPA2 BSSIDs. 1020 * RSN IE. It allows for faster roaming between WPA2 BSSIDs.
1019 * @del_pmksa: Delete a cached PMKID. 1021 * @del_pmksa: Delete a cached PMKID.
1020 * @flush_pmksa: Flush all cached PMKIDs. 1022 * @flush_pmksa: Flush all cached PMKIDs.
1023 * @set_power_mgmt: Configure WLAN power management. A timeout value of -1
1024 * allows the driver to adjust the dynamic ps timeout value.
1021 * @set_cqm_rssi_config: Configure connection quality monitor RSSI threshold. 1025 * @set_cqm_rssi_config: Configure connection quality monitor RSSI threshold.
1022 * 1026 *
1023 */ 1027 */
@@ -1643,7 +1647,7 @@ struct ieee80211_radiotap_iterator {
1643 const struct ieee80211_radiotap_namespace *current_namespace; 1647 const struct ieee80211_radiotap_namespace *current_namespace;
1644 1648
1645 unsigned char *_arg, *_next_ns_data; 1649 unsigned char *_arg, *_next_ns_data;
1646 uint32_t *_next_bitmap; 1650 __le32 *_next_bitmap;
1647 1651
1648 unsigned char *this_arg; 1652 unsigned char *this_arg;
1649 int this_arg_index; 1653 int this_arg_index;
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 52bd9e6c9141..e8923bc20f9f 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -104,7 +104,7 @@ static inline u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla)
104 return frh->table; 104 return frh->table;
105} 105}
106 106
107extern struct fib_rules_ops *fib_rules_register(struct fib_rules_ops *, struct net *); 107extern struct fib_rules_ops *fib_rules_register(const struct fib_rules_ops *, struct net *);
108extern void fib_rules_unregister(struct fib_rules_ops *); 108extern void fib_rules_unregister(struct fib_rules_ops *);
109extern void fib_rules_cleanup_ops(struct fib_rules_ops *); 109extern void fib_rules_cleanup_ops(struct fib_rules_ops *);
110 110
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index b487bc1b99ab..1653de515cee 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -102,7 +102,6 @@ struct rtable;
102 * @uc_ttl - Unicast TTL 102 * @uc_ttl - Unicast TTL
103 * @inet_sport - Source port 103 * @inet_sport - Source port
104 * @inet_id - ID counter for DF pkts 104 * @inet_id - ID counter for DF pkts
105 * @rxhash - flow hash received from netif layer
106 * @tos - TOS 105 * @tos - TOS
107 * @mc_ttl - Multicasting TTL 106 * @mc_ttl - Multicasting TTL
108 * @is_icsk - is this an inet_connection_sock? 107 * @is_icsk - is this an inet_connection_sock?
@@ -126,9 +125,6 @@ struct inet_sock {
126 __u16 cmsg_flags; 125 __u16 cmsg_flags;
127 __be16 inet_sport; 126 __be16 inet_sport;
128 __u16 inet_id; 127 __u16 inet_id;
129#ifdef CONFIG_RPS
130 __u32 rxhash;
131#endif
132 128
133 struct ip_options *opt; 129 struct ip_options *opt;
134 __u8 tos; 130 __u8 tos;
@@ -224,37 +220,4 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
224 return inet_sk(sk)->transparent ? FLOWI_FLAG_ANYSRC : 0; 220 return inet_sk(sk)->transparent ? FLOWI_FLAG_ANYSRC : 0;
225} 221}
226 222
227static inline void inet_rps_record_flow(const struct sock *sk)
228{
229#ifdef CONFIG_RPS
230 struct rps_sock_flow_table *sock_flow_table;
231
232 rcu_read_lock();
233 sock_flow_table = rcu_dereference(rps_sock_flow_table);
234 rps_record_sock_flow(sock_flow_table, inet_sk(sk)->rxhash);
235 rcu_read_unlock();
236#endif
237}
238
239static inline void inet_rps_reset_flow(const struct sock *sk)
240{
241#ifdef CONFIG_RPS
242 struct rps_sock_flow_table *sock_flow_table;
243
244 rcu_read_lock();
245 sock_flow_table = rcu_dereference(rps_sock_flow_table);
246 rps_reset_sock_flow(sock_flow_table, inet_sk(sk)->rxhash);
247 rcu_read_unlock();
248#endif
249}
250
251static inline void inet_rps_save_rxhash(const struct sock *sk, u32 rxhash)
252{
253#ifdef CONFIG_RPS
254 if (unlikely(inet_sk(sk)->rxhash != rxhash)) {
255 inet_rps_reset_flow(sk);
256 inet_sk(sk)->rxhash = rxhash;
257 }
258#endif
259}
260#endif /* _INET_SOCK_H */ 223#endif /* _INET_SOCK_H */
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 79f67eae8a7e..a066fdd50da6 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -224,7 +224,9 @@ static inline
224struct net *twsk_net(const struct inet_timewait_sock *twsk) 224struct net *twsk_net(const struct inet_timewait_sock *twsk)
225{ 225{
226#ifdef CONFIG_NET_NS 226#ifdef CONFIG_NET_NS
227 return rcu_dereference(twsk->tw_net); 227 return rcu_dereference_raw(twsk->tw_net); /* protected by locking, */
228 /* reference counting, */
229 /* initialization, or RCU. */
228#else 230#else
229 return &init_net; 231 return &init_net;
230#endif 232#endif
diff --git a/include/net/ip.h b/include/net/ip.h
index a84ceb692687..8149b77cea9b 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -393,6 +393,7 @@ extern int ip_options_rcv_srr(struct sk_buff *skb);
393 * Functions provided by ip_sockglue.c 393 * Functions provided by ip_sockglue.c
394 */ 394 */
395 395
396extern int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
396extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb); 397extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
397extern int ip_cmsg_send(struct net *net, 398extern int ip_cmsg_send(struct net *net,
398 struct msghdr *msg, struct ipcm_cookie *ipc); 399 struct msghdr *msg, struct ipcm_cookie *ipc);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index b1d8db90b214..eba5cc00325a 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -503,7 +503,8 @@ extern int ip6_append_data(struct sock *sk,
503 struct ipv6_txoptions *opt, 503 struct ipv6_txoptions *opt,
504 struct flowi *fl, 504 struct flowi *fl,
505 struct rt6_info *rt, 505 struct rt6_info *rt,
506 unsigned int flags); 506 unsigned int flags,
507 int dontfrag);
507 508
508extern int ip6_push_pending_frames(struct sock *sk); 509extern int ip6_push_pending_frames(struct sock *sk);
509 510
@@ -577,9 +578,11 @@ extern int ip6_datagram_connect(struct sock *sk,
577 struct sockaddr *addr, int addr_len); 578 struct sockaddr *addr, int addr_len);
578 579
579extern int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len); 580extern int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
581extern int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len);
580extern void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, 582extern void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
581 u32 info, u8 *payload); 583 u32 info, u8 *payload);
582extern void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info); 584extern void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info);
585extern void ipv6_local_rxpmtu(struct sock *sk, struct flowi *fl, u32 mtu);
583 586
584extern int inet6_release(struct socket *sock); 587extern int inet6_release(struct socket *sock);
585extern int inet6_bind(struct socket *sock, struct sockaddr *uaddr, 588extern int inet6_bind(struct socket *sock, struct sockaddr *uaddr,
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 344e5bf72062..54aa16b98b76 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -145,6 +145,7 @@ struct ieee80211_low_level_stats {
145 * @BSS_CHANGED_BEACON_ENABLED: Beaconing should be 145 * @BSS_CHANGED_BEACON_ENABLED: Beaconing should be
146 * enabled/disabled (beaconing modes) 146 * enabled/disabled (beaconing modes)
147 * @BSS_CHANGED_CQM: Connection quality monitor config changed 147 * @BSS_CHANGED_CQM: Connection quality monitor config changed
148 * @BSS_CHANGED_IBSS: IBSS join status changed
148 */ 149 */
149enum ieee80211_bss_change { 150enum ieee80211_bss_change {
150 BSS_CHANGED_ASSOC = 1<<0, 151 BSS_CHANGED_ASSOC = 1<<0,
@@ -158,6 +159,7 @@ enum ieee80211_bss_change {
158 BSS_CHANGED_BEACON = 1<<8, 159 BSS_CHANGED_BEACON = 1<<8,
159 BSS_CHANGED_BEACON_ENABLED = 1<<9, 160 BSS_CHANGED_BEACON_ENABLED = 1<<9,
160 BSS_CHANGED_CQM = 1<<10, 161 BSS_CHANGED_CQM = 1<<10,
162 BSS_CHANGED_IBSS = 1<<11,
161}; 163};
162 164
163/** 165/**
@@ -167,6 +169,8 @@ enum ieee80211_bss_change {
167 * to that BSS) that can change during the lifetime of the BSS. 169 * to that BSS) that can change during the lifetime of the BSS.
168 * 170 *
169 * @assoc: association status 171 * @assoc: association status
172 * @ibss_joined: indicates whether this station is part of an IBSS
173 * or not
170 * @aid: association ID number, valid only when @assoc is true 174 * @aid: association ID number, valid only when @assoc is true
171 * @use_cts_prot: use CTS protection 175 * @use_cts_prot: use CTS protection
172 * @use_short_preamble: use 802.11b short preamble; 176 * @use_short_preamble: use 802.11b short preamble;
@@ -194,7 +198,7 @@ enum ieee80211_bss_change {
194struct ieee80211_bss_conf { 198struct ieee80211_bss_conf {
195 const u8 *bssid; 199 const u8 *bssid;
196 /* association related data */ 200 /* association related data */
197 bool assoc; 201 bool assoc, ibss_joined;
198 u16 aid; 202 u16 aid;
199 /* erp related data */ 203 /* erp related data */
200 bool use_cts_prot; 204 bool use_cts_prot;
@@ -274,6 +278,9 @@ struct ieee80211_bss_conf {
274 * @IEEE80211_TX_INTFL_NL80211_FRAME_TX: Frame was requested through nl80211 278 * @IEEE80211_TX_INTFL_NL80211_FRAME_TX: Frame was requested through nl80211
275 * MLME command (internal to mac80211 to figure out whether to send TX 279 * MLME command (internal to mac80211 to figure out whether to send TX
276 * status to user space) 280 * status to user space)
281 * @IEEE80211_TX_CTL_LDPC: tells the driver to use LDPC for this frame
282 * @IEEE80211_TX_CTL_STBC: Enables Space-Time Block Coding (STBC) for this
283 * frame and selects the maximum number of streams that it can use.
277 */ 284 */
278enum mac80211_tx_control_flags { 285enum mac80211_tx_control_flags {
279 IEEE80211_TX_CTL_REQ_TX_STATUS = BIT(0), 286 IEEE80211_TX_CTL_REQ_TX_STATUS = BIT(0),
@@ -297,6 +304,9 @@ enum mac80211_tx_control_flags {
297 IEEE80211_TX_INTFL_RETRANSMISSION = BIT(19), 304 IEEE80211_TX_INTFL_RETRANSMISSION = BIT(19),
298 IEEE80211_TX_INTFL_HAS_RADIOTAP = BIT(20), 305 IEEE80211_TX_INTFL_HAS_RADIOTAP = BIT(20),
299 IEEE80211_TX_INTFL_NL80211_FRAME_TX = BIT(21), 306 IEEE80211_TX_INTFL_NL80211_FRAME_TX = BIT(21),
307 IEEE80211_TX_CTL_LDPC = BIT(22),
308 IEEE80211_TX_CTL_STBC = BIT(23) | BIT(24),
309#define IEEE80211_TX_CTL_STBC_SHIFT 23
300}; 310};
301 311
302/** 312/**
@@ -395,11 +405,11 @@ struct ieee80211_tx_rate {
395 * @status: union for status data 405 * @status: union for status data
396 * @driver_data: array of driver_data pointers 406 * @driver_data: array of driver_data pointers
397 * @ampdu_ack_len: number of acked aggregated frames. 407 * @ampdu_ack_len: number of acked aggregated frames.
398 * relevant only if IEEE80211_TX_STATUS_AMPDU was set. 408 * relevant only if IEEE80211_TX_STAT_AMPDU was set.
399 * @ampdu_ack_map: block ack bit map for the aggregation. 409 * @ampdu_ack_map: block ack bit map for the aggregation.
400 * relevant only if IEEE80211_TX_STATUS_AMPDU was set. 410 * relevant only if IEEE80211_TX_STAT_AMPDU was set.
401 * @ampdu_len: number of aggregated frames. 411 * @ampdu_len: number of aggregated frames.
402 * relevant only if IEEE80211_TX_STATUS_AMPDU was set. 412 * relevant only if IEEE80211_TX_STAT_AMPDU was set.
403 * @ack_signal: signal strength of the ACK frame 413 * @ack_signal: signal strength of the ACK frame
404 */ 414 */
405struct ieee80211_tx_info { 415struct ieee80211_tx_info {
@@ -550,7 +560,6 @@ enum mac80211_rx_flags {
550 * @signal: signal strength when receiving this frame, either in dBm, in dB or 560 * @signal: signal strength when receiving this frame, either in dBm, in dB or
551 * unspecified depending on the hardware capabilities flags 561 * unspecified depending on the hardware capabilities flags
552 * @IEEE80211_HW_SIGNAL_* 562 * @IEEE80211_HW_SIGNAL_*
553 * @noise: noise when receiving this frame, in dBm (DEPRECATED).
554 * @antenna: antenna used 563 * @antenna: antenna used
555 * @rate_idx: index of data rate into band's supported rates or MCS index if 564 * @rate_idx: index of data rate into band's supported rates or MCS index if
556 * HT rates are use (RX_FLAG_HT) 565 * HT rates are use (RX_FLAG_HT)
@@ -561,7 +570,6 @@ struct ieee80211_rx_status {
561 enum ieee80211_band band; 570 enum ieee80211_band band;
562 int freq; 571 int freq;
563 int signal; 572 int signal;
564 int noise __deprecated;
565 int antenna; 573 int antenna;
566 int rate_idx; 574 int rate_idx;
567 int flag; 575 int flag;
@@ -610,6 +618,7 @@ enum ieee80211_conf_flags {
610 * @IEEE80211_CONF_CHANGE_RETRY_LIMITS: retry limits changed 618 * @IEEE80211_CONF_CHANGE_RETRY_LIMITS: retry limits changed
611 * @IEEE80211_CONF_CHANGE_IDLE: Idle flag changed 619 * @IEEE80211_CONF_CHANGE_IDLE: Idle flag changed
612 * @IEEE80211_CONF_CHANGE_SMPS: Spatial multiplexing powersave mode changed 620 * @IEEE80211_CONF_CHANGE_SMPS: Spatial multiplexing powersave mode changed
621 * @IEEE80211_CONF_CHANGE_QOS: Quality of service was enabled or disabled
613 */ 622 */
614enum ieee80211_conf_changed { 623enum ieee80211_conf_changed {
615 IEEE80211_CONF_CHANGE_SMPS = BIT(1), 624 IEEE80211_CONF_CHANGE_SMPS = BIT(1),
@@ -661,6 +670,9 @@ enum ieee80211_smps_mode {
661 * @dynamic_ps_timeout: The dynamic powersave timeout (in ms), see the 670 * @dynamic_ps_timeout: The dynamic powersave timeout (in ms), see the
662 * powersave documentation below. This variable is valid only when 671 * powersave documentation below. This variable is valid only when
663 * the CONF_PS flag is set. 672 * the CONF_PS flag is set.
673 * @dynamic_ps_forced_timeout: The dynamic powersave timeout (in ms) configured
674 * by cfg80211 (essentially, wext) If set, this value overrules the value
675 * chosen by mac80211 based on ps qos network latency.
664 * 676 *
665 * @power_level: requested transmit power (in dBm) 677 * @power_level: requested transmit power (in dBm)
666 * 678 *
@@ -680,7 +692,7 @@ enum ieee80211_smps_mode {
680 */ 692 */
681struct ieee80211_conf { 693struct ieee80211_conf {
682 u32 flags; 694 u32 flags;
683 int power_level, dynamic_ps_timeout; 695 int power_level, dynamic_ps_timeout, dynamic_ps_forced_timeout;
684 int max_sleep_period; 696 int max_sleep_period;
685 697
686 u16 listen_interval; 698 u16 listen_interval;
@@ -791,6 +803,7 @@ struct ieee80211_key_conf {
791 u8 iv_len; 803 u8 iv_len;
792 u8 hw_key_idx; 804 u8 hw_key_idx;
793 u8 flags; 805 u8 flags;
806 u8 *ap_addr;
794 s8 keyidx; 807 s8 keyidx;
795 u8 keylen; 808 u8 keylen;
796 u8 key[0]; 809 u8 key[0];
@@ -919,10 +932,6 @@ enum ieee80211_tkip_key_type {
919 * one milliwatt. This is the preferred method since it is standardized 932 * one milliwatt. This is the preferred method since it is standardized
920 * between different devices. @max_signal does not need to be set. 933 * between different devices. @max_signal does not need to be set.
921 * 934 *
922 * @IEEE80211_HW_NOISE_DBM:
923 * Hardware can provide noise (radio interference) values in units dBm,
924 * decibel difference from one milliwatt.
925 *
926 * @IEEE80211_HW_SPECTRUM_MGMT: 935 * @IEEE80211_HW_SPECTRUM_MGMT:
927 * Hardware supports spectrum management defined in 802.11h 936 * Hardware supports spectrum management defined in 802.11h
928 * Measurement, Channel Switch, Quieting, TPC 937 * Measurement, Channel Switch, Quieting, TPC
@@ -986,7 +995,7 @@ enum ieee80211_hw_flags {
986 IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE = 1<<4, 995 IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE = 1<<4,
987 IEEE80211_HW_SIGNAL_UNSPEC = 1<<5, 996 IEEE80211_HW_SIGNAL_UNSPEC = 1<<5,
988 IEEE80211_HW_SIGNAL_DBM = 1<<6, 997 IEEE80211_HW_SIGNAL_DBM = 1<<6,
989 IEEE80211_HW_NOISE_DBM = 1<<7, 998 /* use this hole */
990 IEEE80211_HW_SPECTRUM_MGMT = 1<<8, 999 IEEE80211_HW_SPECTRUM_MGMT = 1<<8,
991 IEEE80211_HW_AMPDU_AGGREGATION = 1<<9, 1000 IEEE80211_HW_AMPDU_AGGREGATION = 1<<9,
992 IEEE80211_HW_SUPPORTS_PS = 1<<10, 1001 IEEE80211_HW_SUPPORTS_PS = 1<<10,
@@ -1646,7 +1655,7 @@ struct ieee80211_ops {
1646 struct ieee80211_key_conf *conf, 1655 struct ieee80211_key_conf *conf,
1647 struct ieee80211_sta *sta, 1656 struct ieee80211_sta *sta,
1648 u32 iv32, u16 *phase1key); 1657 u32 iv32, u16 *phase1key);
1649 int (*hw_scan)(struct ieee80211_hw *hw, 1658 int (*hw_scan)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1650 struct cfg80211_scan_request *req); 1659 struct cfg80211_scan_request *req);
1651 void (*sw_scan_start)(struct ieee80211_hw *hw); 1660 void (*sw_scan_start)(struct ieee80211_hw *hw);
1652 void (*sw_scan_complete)(struct ieee80211_hw *hw); 1661 void (*sw_scan_complete)(struct ieee80211_hw *hw);
@@ -1671,7 +1680,8 @@ struct ieee80211_ops {
1671 struct ieee80211_vif *vif, 1680 struct ieee80211_vif *vif,
1672 enum ieee80211_ampdu_mlme_action action, 1681 enum ieee80211_ampdu_mlme_action action,
1673 struct ieee80211_sta *sta, u16 tid, u16 *ssn); 1682 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
1674 1683 int (*get_survey)(struct ieee80211_hw *hw, int idx,
1684 struct survey_info *survey);
1675 void (*rfkill_poll)(struct ieee80211_hw *hw); 1685 void (*rfkill_poll)(struct ieee80211_hw *hw);
1676 void (*set_coverage_class)(struct ieee80211_hw *hw, u8 coverage_class); 1686 void (*set_coverage_class)(struct ieee80211_hw *hw, u8 coverage_class);
1677#ifdef CONFIG_NL80211_TESTMODE 1687#ifdef CONFIG_NL80211_TESTMODE
diff --git a/include/net/mld.h b/include/net/mld.h
new file mode 100644
index 000000000000..467143cd4e2f
--- /dev/null
+++ b/include/net/mld.h
@@ -0,0 +1,75 @@
1#ifndef LINUX_MLD_H
2#define LINUX_MLD_H
3
4#include <linux/in6.h>
5#include <linux/icmpv6.h>
6
7/* MLDv1 Query/Report/Done */
8struct mld_msg {
9 struct icmp6hdr mld_hdr;
10 struct in6_addr mld_mca;
11};
12
13#define mld_type mld_hdr.icmp6_type
14#define mld_code mld_hdr.icmp6_code
15#define mld_cksum mld_hdr.icmp6_cksum
16#define mld_maxdelay mld_hdr.icmp6_maxdelay
17#define mld_reserved mld_hdr.icmp6_dataun.un_data16[1]
18
19/* Multicast Listener Discovery version 2 headers */
20/* MLDv2 Report */
21struct mld2_grec {
22 __u8 grec_type;
23 __u8 grec_auxwords;
24 __be16 grec_nsrcs;
25 struct in6_addr grec_mca;
26 struct in6_addr grec_src[0];
27};
28
29struct mld2_report {
30 struct icmp6hdr mld2r_hdr;
31 struct mld2_grec mld2r_grec[0];
32};
33
34#define mld2r_type mld2r_hdr.icmp6_type
35#define mld2r_resv1 mld2r_hdr.icmp6_code
36#define mld2r_cksum mld2r_hdr.icmp6_cksum
37#define mld2r_resv2 mld2r_hdr.icmp6_dataun.un_data16[0]
38#define mld2r_ngrec mld2r_hdr.icmp6_dataun.un_data16[1]
39
40/* MLDv2 Query */
41struct mld2_query {
42 struct icmp6hdr mld2q_hdr;
43 struct in6_addr mld2q_mca;
44#if defined(__LITTLE_ENDIAN_BITFIELD)
45 __u8 mld2q_qrv:3,
46 mld2q_suppress:1,
47 mld2q_resv2:4;
48#elif defined(__BIG_ENDIAN_BITFIELD)
49 __u8 mld2q_resv2:4,
50 mld2q_suppress:1,
51 mld2q_qrv:3;
52#else
53#error "Please fix <asm/byteorder.h>"
54#endif
55 __u8 mld2q_qqic;
56 __be16 mld2q_nsrcs;
57 struct in6_addr mld2q_srcs[0];
58};
59
60#define mld2q_type mld2q_hdr.icmp6_type
61#define mld2q_code mld2q_hdr.icmp6_code
62#define mld2q_cksum mld2q_hdr.icmp6_cksum
63#define mld2q_mrc mld2q_hdr.icmp6_maxdelay
64#define mld2q_resv1 mld2q_hdr.icmp6_dataun.un_data16[1]
65
66/* Max Response Code */
67#define MLDV2_MASK(value, nb) ((nb)>=32 ? (value) : ((1<<(nb))-1) & (value))
68#define MLDV2_EXP(thresh, nbmant, nbexp, value) \
69 ((value) < (thresh) ? (value) : \
70 ((MLDV2_MASK(value, nbmant) | (1<<(nbmant))) << \
71 (MLDV2_MASK((value) >> (nbmant), nbexp) + (nbexp))))
72
73#define MLDV2_MRC(value) MLDV2_EXP(0x8000, 12, 3, value)
74
75#endif
diff --git a/include/net/netns/generic.h b/include/net/netns/generic.h
index ff4982ab84b6..81a31c0db3e7 100644
--- a/include/net/netns/generic.h
+++ b/include/net/netns/generic.h
@@ -14,11 +14,8 @@
14 * The rules are simple: 14 * The rules are simple:
15 * 1. set pernet_operations->id. After register_pernet_device you 15 * 1. set pernet_operations->id. After register_pernet_device you
16 * will have the id of your private pointer. 16 * will have the id of your private pointer.
17 * 2. Either set pernet_operations->size (to have the code allocate and 17 * 2. set pernet_operations->size to have the code allocate and free
18 * free a private structure pointed to from struct net ) or 18 * a private structure pointed to from struct net.
19 * call net_assign_generic() to put the private data on the struct
20 * net (most preferably this should be done in the ->init callback
21 * of the ops registered);
22 * 3. do not change this pointer while the net is alive; 19 * 3. do not change this pointer while the net is alive;
23 * 4. do not try to have any private reference on the net_generic object. 20 * 4. do not try to have any private reference on the net_generic object.
24 * 21 *
@@ -46,6 +43,4 @@ static inline void *net_generic(struct net *net, int id)
46 43
47 return ptr; 44 return ptr;
48} 45}
49
50extern int net_assign_generic(struct net *net, int id, void *data);
51#endif 46#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index ae07feec6446..d68c3f121774 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -55,7 +55,6 @@ struct netns_ipv4 {
55 int sysctl_rt_cache_rebuild_count; 55 int sysctl_rt_cache_rebuild_count;
56 int current_rt_cache_rebuild_count; 56 int current_rt_cache_rebuild_count;
57 57
58 struct timer_list rt_secret_timer;
59 atomic_t rt_genid; 58 atomic_t rt_genid;
60 59
61#ifdef CONFIG_IP_MROUTE 60#ifdef CONFIG_IP_MROUTE
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 8be5135ff7aa..2c55a7ea20af 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -107,6 +107,7 @@ typedef enum {
107 SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */ 107 SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
108 SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */ 108 SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
109 SCTP_CMD_SEND_MSG, /* Send the whole use message */ 109 SCTP_CMD_SEND_MSG, /* Send the whole use message */
110 SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
110 SCTP_CMD_LAST 111 SCTP_CMD_LAST
111} sctp_verb_t; 112} sctp_verb_t;
112 113
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 59151557406c..65946bc43d00 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -128,6 +128,7 @@ extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
128int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb); 128int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
129int sctp_inet_listen(struct socket *sock, int backlog); 129int sctp_inet_listen(struct socket *sock, int backlog);
130void sctp_write_space(struct sock *sk); 130void sctp_write_space(struct sock *sk);
131void sctp_data_ready(struct sock *sk, int len);
131unsigned int sctp_poll(struct file *file, struct socket *sock, 132unsigned int sctp_poll(struct file *file, struct socket *sock,
132 poll_table *wait); 133 poll_table *wait);
133void sctp_sock_rfree(struct sk_buff *skb); 134void sctp_sock_rfree(struct sk_buff *skb);
@@ -546,7 +547,7 @@ for (pos = chunk->subh.fwdtsn_hdr->skip;\
546#define WORD_ROUND(s) (((s)+3)&~3) 547#define WORD_ROUND(s) (((s)+3)&~3)
547 548
548/* Make a new instance of type. */ 549/* Make a new instance of type. */
549#define t_new(type, flags) (type *)kmalloc(sizeof(type), flags) 550#define t_new(type, flags) (type *)kzalloc(sizeof(type), flags)
550 551
551/* Compare two timevals. */ 552/* Compare two timevals. */
552#define tv_lt(s, t) \ 553#define tv_lt(s, t) \
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 851c813adb3a..273a8bb683e3 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -437,7 +437,7 @@ sctp_vtag_verify_either(const struct sctp_chunk *chunk,
437 */ 437 */
438 if ((!sctp_test_T_bit(chunk) && 438 if ((!sctp_test_T_bit(chunk) &&
439 (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag)) || 439 (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag)) ||
440 (sctp_test_T_bit(chunk) && 440 (sctp_test_T_bit(chunk) && asoc->c.peer_vtag &&
441 (ntohl(chunk->sctp_hdr->vtag) == asoc->c.peer_vtag))) { 441 (ntohl(chunk->sctp_hdr->vtag) == asoc->c.peer_vtag))) {
442 return 1; 442 return 1;
443 } 443 }
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index ff3017744711..43257b903c82 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -643,17 +643,15 @@ struct sctp_pf {
643struct sctp_datamsg { 643struct sctp_datamsg {
644 /* Chunks waiting to be submitted to lower layer. */ 644 /* Chunks waiting to be submitted to lower layer. */
645 struct list_head chunks; 645 struct list_head chunks;
646 /* Chunks that have been transmitted. */
647 size_t msg_size;
648 /* Reference counting. */ 646 /* Reference counting. */
649 atomic_t refcnt; 647 atomic_t refcnt;
650 /* When is this message no longer interesting to the peer? */ 648 /* When is this message no longer interesting to the peer? */
651 unsigned long expires_at; 649 unsigned long expires_at;
652 /* Did the messenge fail to send? */ 650 /* Did the messenge fail to send? */
653 int send_error; 651 int send_error;
654 char send_failed; 652 u8 send_failed:1,
655 /* Control whether chunks from this message can be abandoned. */ 653 can_abandon:1, /* can chunks from this message can be abandoned. */
656 char can_abandon; 654 can_delay; /* should this message be Nagle delayed */
657}; 655};
658 656
659struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *, 657struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *,
@@ -757,7 +755,6 @@ struct sctp_chunk {
757#define SCTP_NEED_FRTX 0x1 755#define SCTP_NEED_FRTX 0x1
758#define SCTP_DONT_FRTX 0x2 756#define SCTP_DONT_FRTX 0x2
759 __u16 rtt_in_progress:1, /* This chunk used for RTT calc? */ 757 __u16 rtt_in_progress:1, /* This chunk used for RTT calc? */
760 resent:1, /* Has this chunk ever been resent. */
761 has_tsn:1, /* Does this chunk have a TSN yet? */ 758 has_tsn:1, /* Does this chunk have a TSN yet? */
762 has_ssn:1, /* Does this chunk have a SSN yet? */ 759 has_ssn:1, /* Does this chunk have a SSN yet? */
763 singleton:1, /* Only chunk in the packet? */ 760 singleton:1, /* Only chunk in the packet? */
@@ -778,6 +775,7 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len,
778 struct iovec *data); 775 struct iovec *data);
779void sctp_chunk_free(struct sctp_chunk *); 776void sctp_chunk_free(struct sctp_chunk *);
780void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data); 777void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data);
778void *sctp_addto_chunk_fixed(struct sctp_chunk *, int len, const void *data);
781struct sctp_chunk *sctp_chunkify(struct sk_buff *, 779struct sctp_chunk *sctp_chunkify(struct sk_buff *,
782 const struct sctp_association *, 780 const struct sctp_association *,
783 struct sock *); 781 struct sock *);
@@ -878,7 +876,30 @@ struct sctp_transport {
878 876
879 /* Reference counting. */ 877 /* Reference counting. */
880 atomic_t refcnt; 878 atomic_t refcnt;
881 int dead; 879 int dead:1,
880 /* RTO-Pending : A flag used to track if one of the DATA
881 * chunks sent to this address is currently being
882 * used to compute a RTT. If this flag is 0,
883 * the next DATA chunk sent to this destination
884 * should be used to compute a RTT and this flag
885 * should be set. Every time the RTT
886 * calculation completes (i.e. the DATA chunk
887 * is SACK'd) clear this flag.
888 */
889 rto_pending:1,
890
891 /*
892 * hb_sent : a flag that signals that we have a pending
893 * heartbeat.
894 */
895 hb_sent:1,
896
897 /* Is the Path MTU update pending on this tranport */
898 pmtu_pending:1,
899
900 /* Is this structure kfree()able? */
901 malloced:1;
902
882 903
883 /* This is the peer's IP address and port. */ 904 /* This is the peer's IP address and port. */
884 union sctp_addr ipaddr; 905 union sctp_addr ipaddr;
@@ -908,22 +929,6 @@ struct sctp_transport {
908 /* SRTT : The current smoothed round trip time. */ 929 /* SRTT : The current smoothed round trip time. */
909 __u32 srtt; 930 __u32 srtt;
910 931
911 /* RTO-Pending : A flag used to track if one of the DATA
912 * chunks sent to this address is currently being
913 * used to compute a RTT. If this flag is 0,
914 * the next DATA chunk sent to this destination
915 * should be used to compute a RTT and this flag
916 * should be set. Every time the RTT
917 * calculation completes (i.e. the DATA chunk
918 * is SACK'd) clear this flag.
919 * hb_sent : a flag that signals that we have a pending heartbeat.
920 */
921 __u8 rto_pending;
922 __u8 hb_sent;
923
924 /* Flag to track the current fast recovery state */
925 __u8 fast_recovery;
926
927 /* 932 /*
928 * These are the congestion stats. 933 * These are the congestion stats.
929 */ 934 */
@@ -943,9 +948,6 @@ struct sctp_transport {
943 948
944 __u32 burst_limited; /* Holds old cwnd when max.burst is applied */ 949 __u32 burst_limited; /* Holds old cwnd when max.burst is applied */
945 950
946 /* TSN marking the fast recovery exit point */
947 __u32 fast_recovery_exit;
948
949 /* Destination */ 951 /* Destination */
950 struct dst_entry *dst; 952 struct dst_entry *dst;
951 /* Source address. */ 953 /* Source address. */
@@ -976,9 +978,6 @@ struct sctp_transport {
976 */ 978 */
977 __u16 pathmaxrxt; 979 __u16 pathmaxrxt;
978 980
979 /* is the Path MTU update pending on this tranport */
980 __u8 pmtu_pending;
981
982 /* PMTU : The current known path MTU. */ 981 /* PMTU : The current known path MTU. */
983 __u32 pathmtu; 982 __u32 pathmtu;
984 983
@@ -1022,8 +1021,6 @@ struct sctp_transport {
1022 /* This is the list of transports that have chunks to send. */ 1021 /* This is the list of transports that have chunks to send. */
1023 struct list_head send_ready; 1022 struct list_head send_ready;
1024 1023
1025 int malloced; /* Is this structure kfree()able? */
1026
1027 /* State information saved for SFR_CACC algorithm. The key 1024 /* State information saved for SFR_CACC algorithm. The key
1028 * idea in SFR_CACC is to maintain state at the sender on a 1025 * idea in SFR_CACC is to maintain state at the sender on a
1029 * per-destination basis when a changeover happens. 1026 * per-destination basis when a changeover happens.
@@ -1065,7 +1062,7 @@ void sctp_transport_route(struct sctp_transport *, union sctp_addr *,
1065 struct sctp_sock *); 1062 struct sctp_sock *);
1066void sctp_transport_pmtu(struct sctp_transport *); 1063void sctp_transport_pmtu(struct sctp_transport *);
1067void sctp_transport_free(struct sctp_transport *); 1064void sctp_transport_free(struct sctp_transport *);
1068void sctp_transport_reset_timers(struct sctp_transport *, int); 1065void sctp_transport_reset_timers(struct sctp_transport *);
1069void sctp_transport_hold(struct sctp_transport *); 1066void sctp_transport_hold(struct sctp_transport *);
1070void sctp_transport_put(struct sctp_transport *); 1067void sctp_transport_put(struct sctp_transport *);
1071void sctp_transport_update_rto(struct sctp_transport *, __u32); 1068void sctp_transport_update_rto(struct sctp_transport *, __u32);
@@ -1719,6 +1716,12 @@ struct sctp_association {
1719 /* Highest TSN that is acknowledged by incoming SACKs. */ 1716 /* Highest TSN that is acknowledged by incoming SACKs. */
1720 __u32 highest_sacked; 1717 __u32 highest_sacked;
1721 1718
1719 /* TSN marking the fast recovery exit point */
1720 __u32 fast_recovery_exit;
1721
1722 /* Flag to track the current fast recovery state */
1723 __u8 fast_recovery;
1724
1722 /* The number of unacknowledged data chunks. Reported through 1725 /* The number of unacknowledged data chunks. Reported through
1723 * the SCTP_STATUS sockopt. 1726 * the SCTP_STATUS sockopt.
1724 */ 1727 */
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 884fdbb74b23..92456f1035f5 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -133,6 +133,8 @@ struct linux_xfrm_mib {
133 __this_cpu_add(mib[0]->mibs[field], addend) 133 __this_cpu_add(mib[0]->mibs[field], addend)
134#define SNMP_ADD_STATS_USER(mib, field, addend) \ 134#define SNMP_ADD_STATS_USER(mib, field, addend) \
135 this_cpu_add(mib[1]->mibs[field], addend) 135 this_cpu_add(mib[1]->mibs[field], addend)
136#define SNMP_ADD_STATS(mib, field, addend) \
137 this_cpu_add(mib[0]->mibs[field], addend)
136/* 138/*
137 * Use "__typeof__(*mib[0]) *ptr" instead of "__typeof__(mib[0]) ptr" 139 * Use "__typeof__(*mib[0]) *ptr" instead of "__typeof__(mib[0]) ptr"
138 * to make @ptr a non-percpu pointer. 140 * to make @ptr a non-percpu pointer.
diff --git a/include/net/sock.h b/include/net/sock.h
index 56df440a950b..328e03f47dd1 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -74,7 +74,7 @@
74 printk(KERN_DEBUG msg); } while (0) 74 printk(KERN_DEBUG msg); } while (0)
75#else 75#else
76/* Validate arguments and do nothing */ 76/* Validate arguments and do nothing */
77static void inline int __attribute__ ((format (printf, 2, 3))) 77static inline void __attribute__ ((format (printf, 2, 3)))
78SOCK_DEBUG(struct sock *sk, const char *msg, ...) 78SOCK_DEBUG(struct sock *sk, const char *msg, ...)
79{ 79{
80} 80}
@@ -159,7 +159,7 @@ struct sock_common {
159 * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings 159 * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
160 * @sk_lock: synchronizer 160 * @sk_lock: synchronizer
161 * @sk_rcvbuf: size of receive buffer in bytes 161 * @sk_rcvbuf: size of receive buffer in bytes
162 * @sk_sleep: sock wait queue 162 * @sk_wq: sock wait queue and async head
163 * @sk_dst_cache: destination cache 163 * @sk_dst_cache: destination cache
164 * @sk_dst_lock: destination cache lock 164 * @sk_dst_lock: destination cache lock
165 * @sk_policy: flow policy 165 * @sk_policy: flow policy
@@ -198,6 +198,7 @@ struct sock_common {
198 * @sk_rcvlowat: %SO_RCVLOWAT setting 198 * @sk_rcvlowat: %SO_RCVLOWAT setting
199 * @sk_rcvtimeo: %SO_RCVTIMEO setting 199 * @sk_rcvtimeo: %SO_RCVTIMEO setting
200 * @sk_sndtimeo: %SO_SNDTIMEO setting 200 * @sk_sndtimeo: %SO_SNDTIMEO setting
201 * @sk_rxhash: flow hash received from netif layer
201 * @sk_filter: socket filtering instructions 202 * @sk_filter: socket filtering instructions
202 * @sk_protinfo: private area, net family specific, when not using slab 203 * @sk_protinfo: private area, net family specific, when not using slab
203 * @sk_timer: sock cleanup timer 204 * @sk_timer: sock cleanup timer
@@ -255,9 +256,8 @@ struct sock {
255 struct sk_buff *head; 256 struct sk_buff *head;
256 struct sk_buff *tail; 257 struct sk_buff *tail;
257 int len; 258 int len;
258 int limit;
259 } sk_backlog; 259 } sk_backlog;
260 wait_queue_head_t *sk_sleep; 260 struct socket_wq *sk_wq;
261 struct dst_entry *sk_dst_cache; 261 struct dst_entry *sk_dst_cache;
262#ifdef CONFIG_XFRM 262#ifdef CONFIG_XFRM
263 struct xfrm_policy *sk_policy[2]; 263 struct xfrm_policy *sk_policy[2];
@@ -279,6 +279,9 @@ struct sock {
279 int sk_gso_type; 279 int sk_gso_type;
280 unsigned int sk_gso_max_size; 280 unsigned int sk_gso_max_size;
281 int sk_rcvlowat; 281 int sk_rcvlowat;
282#ifdef CONFIG_RPS
283 __u32 sk_rxhash;
284#endif
282 unsigned long sk_flags; 285 unsigned long sk_flags;
283 unsigned long sk_lingertime; 286 unsigned long sk_lingertime;
284 struct sk_buff_head sk_error_queue; 287 struct sk_buff_head sk_error_queue;
@@ -604,10 +607,20 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
604 skb->next = NULL; 607 skb->next = NULL;
605} 608}
606 609
610/*
611 * Take into account size of receive queue and backlog queue
612 */
613static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
614{
615 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
616
617 return qsize + skb->truesize > sk->sk_rcvbuf;
618}
619
607/* The per-socket spinlock must be held here. */ 620/* The per-socket spinlock must be held here. */
608static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb) 621static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
609{ 622{
610 if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1)) 623 if (sk_rcvqueues_full(sk, skb))
611 return -ENOBUFS; 624 return -ENOBUFS;
612 625
613 __sk_add_backlog(sk, skb); 626 __sk_add_backlog(sk, skb);
@@ -620,6 +633,40 @@ static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
620 return sk->sk_backlog_rcv(sk, skb); 633 return sk->sk_backlog_rcv(sk, skb);
621} 634}
622 635
636static inline void sock_rps_record_flow(const struct sock *sk)
637{
638#ifdef CONFIG_RPS
639 struct rps_sock_flow_table *sock_flow_table;
640
641 rcu_read_lock();
642 sock_flow_table = rcu_dereference(rps_sock_flow_table);
643 rps_record_sock_flow(sock_flow_table, sk->sk_rxhash);
644 rcu_read_unlock();
645#endif
646}
647
648static inline void sock_rps_reset_flow(const struct sock *sk)
649{
650#ifdef CONFIG_RPS
651 struct rps_sock_flow_table *sock_flow_table;
652
653 rcu_read_lock();
654 sock_flow_table = rcu_dereference(rps_sock_flow_table);
655 rps_reset_sock_flow(sock_flow_table, sk->sk_rxhash);
656 rcu_read_unlock();
657#endif
658}
659
660static inline void sock_rps_save_rxhash(struct sock *sk, u32 rxhash)
661{
662#ifdef CONFIG_RPS
663 if (unlikely(sk->sk_rxhash != rxhash)) {
664 sock_rps_reset_flow(sk);
665 sk->sk_rxhash = rxhash;
666 }
667#endif
668}
669
623#define sk_wait_event(__sk, __timeo, __condition) \ 670#define sk_wait_event(__sk, __timeo, __condition) \
624 ({ int __rc; \ 671 ({ int __rc; \
625 release_sock(__sk); \ 672 release_sock(__sk); \
@@ -974,6 +1021,16 @@ extern void release_sock(struct sock *sk);
974 SINGLE_DEPTH_NESTING) 1021 SINGLE_DEPTH_NESTING)
975#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) 1022#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
976 1023
1024static inline void lock_sock_bh(struct sock *sk)
1025{
1026 spin_lock_bh(&sk->sk_lock.slock);
1027}
1028
1029static inline void unlock_sock_bh(struct sock *sk)
1030{
1031 spin_unlock_bh(&sk->sk_lock.slock);
1032}
1033
977extern struct sock *sk_alloc(struct net *net, int family, 1034extern struct sock *sk_alloc(struct net *net, int family,
978 gfp_t priority, 1035 gfp_t priority,
979 struct proto *prot); 1036 struct proto *prot);
@@ -1160,6 +1217,10 @@ static inline void sk_set_socket(struct sock *sk, struct socket *sock)
1160 sk->sk_socket = sock; 1217 sk->sk_socket = sock;
1161} 1218}
1162 1219
1220static inline wait_queue_head_t *sk_sleep(struct sock *sk)
1221{
1222 return &sk->sk_wq->wait;
1223}
1163/* Detach socket from process context. 1224/* Detach socket from process context.
1164 * Announce socket dead, detach it from wait queue and inode. 1225 * Announce socket dead, detach it from wait queue and inode.
1165 * Note that parent inode held reference count on this struct sock, 1226 * Note that parent inode held reference count on this struct sock,
@@ -1172,14 +1233,14 @@ static inline void sock_orphan(struct sock *sk)
1172 write_lock_bh(&sk->sk_callback_lock); 1233 write_lock_bh(&sk->sk_callback_lock);
1173 sock_set_flag(sk, SOCK_DEAD); 1234 sock_set_flag(sk, SOCK_DEAD);
1174 sk_set_socket(sk, NULL); 1235 sk_set_socket(sk, NULL);
1175 sk->sk_sleep = NULL; 1236 sk->sk_wq = NULL;
1176 write_unlock_bh(&sk->sk_callback_lock); 1237 write_unlock_bh(&sk->sk_callback_lock);
1177} 1238}
1178 1239
1179static inline void sock_graft(struct sock *sk, struct socket *parent) 1240static inline void sock_graft(struct sock *sk, struct socket *parent)
1180{ 1241{
1181 write_lock_bh(&sk->sk_callback_lock); 1242 write_lock_bh(&sk->sk_callback_lock);
1182 sk->sk_sleep = &parent->wait; 1243 rcu_assign_pointer(sk->sk_wq, parent->wq);
1183 parent->sk = sk; 1244 parent->sk = sk;
1184 sk_set_socket(sk, parent); 1245 sk_set_socket(sk, parent);
1185 security_sock_graft(sk, parent); 1246 security_sock_graft(sk, parent);
@@ -1193,7 +1254,8 @@ static inline struct dst_entry *
1193__sk_dst_get(struct sock *sk) 1254__sk_dst_get(struct sock *sk)
1194{ 1255{
1195 return rcu_dereference_check(sk->sk_dst_cache, rcu_read_lock_held() || 1256 return rcu_dereference_check(sk->sk_dst_cache, rcu_read_lock_held() ||
1196 sock_owned_by_user(sk)); 1257 sock_owned_by_user(sk) ||
1258 lockdep_is_held(&sk->sk_lock.slock));
1197} 1259}
1198 1260
1199static inline struct dst_entry * 1261static inline struct dst_entry *
@@ -1231,8 +1293,11 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
1231 struct dst_entry *old_dst; 1293 struct dst_entry *old_dst;
1232 1294
1233 sk_tx_queue_clear(sk); 1295 sk_tx_queue_clear(sk);
1234 old_dst = rcu_dereference_check(sk->sk_dst_cache, 1296 /*
1235 lockdep_is_held(&sk->sk_dst_lock)); 1297 * This can be called while sk is owned by the caller only,
1298 * with no state that can be checked in a rcu_dereference_check() cond
1299 */
1300 old_dst = rcu_dereference_raw(sk->sk_dst_cache);
1236 rcu_assign_pointer(sk->sk_dst_cache, dst); 1301 rcu_assign_pointer(sk->sk_dst_cache, dst);
1237 dst_release(old_dst); 1302 dst_release(old_dst);
1238} 1303}
@@ -1327,12 +1392,12 @@ static inline int sk_has_allocations(const struct sock *sk)
1327} 1392}
1328 1393
1329/** 1394/**
1330 * sk_has_sleeper - check if there are any waiting processes 1395 * wq_has_sleeper - check if there are any waiting processes
1331 * @sk: socket 1396 * @sk: struct socket_wq
1332 * 1397 *
1333 * Returns true if socket has waiting processes 1398 * Returns true if socket_wq has waiting processes
1334 * 1399 *
1335 * The purpose of the sk_has_sleeper and sock_poll_wait is to wrap the memory 1400 * The purpose of the wq_has_sleeper and sock_poll_wait is to wrap the memory
1336 * barrier call. They were added due to the race found within the tcp code. 1401 * barrier call. They were added due to the race found within the tcp code.
1337 * 1402 *
1338 * Consider following tcp code paths: 1403 * Consider following tcp code paths:
@@ -1345,9 +1410,10 @@ static inline int sk_has_allocations(const struct sock *sk)
1345 * ... ... 1410 * ... ...
1346 * tp->rcv_nxt check sock_def_readable 1411 * tp->rcv_nxt check sock_def_readable
1347 * ... { 1412 * ... {
1348 * schedule ... 1413 * schedule rcu_read_lock();
1349 * if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1414 * wq = rcu_dereference(sk->sk_wq);
1350 * wake_up_interruptible(sk->sk_sleep) 1415 * if (wq && waitqueue_active(&wq->wait))
1416 * wake_up_interruptible(&wq->wait)
1351 * ... 1417 * ...
1352 * } 1418 * }
1353 * 1419 *
@@ -1356,19 +1422,18 @@ static inline int sk_has_allocations(const struct sock *sk)
1356 * could then endup calling schedule and sleep forever if there are no more 1422 * could then endup calling schedule and sleep forever if there are no more
1357 * data on the socket. 1423 * data on the socket.
1358 * 1424 *
1359 * The sk_has_sleeper is always called right after a call to read_lock, so we
1360 * can use smp_mb__after_lock barrier.
1361 */ 1425 */
1362static inline int sk_has_sleeper(struct sock *sk) 1426static inline bool wq_has_sleeper(struct socket_wq *wq)
1363{ 1427{
1428
1364 /* 1429 /*
1365 * We need to be sure we are in sync with the 1430 * We need to be sure we are in sync with the
1366 * add_wait_queue modifications to the wait queue. 1431 * add_wait_queue modifications to the wait queue.
1367 * 1432 *
1368 * This memory barrier is paired in the sock_poll_wait. 1433 * This memory barrier is paired in the sock_poll_wait.
1369 */ 1434 */
1370 smp_mb__after_lock(); 1435 smp_mb();
1371 return sk->sk_sleep && waitqueue_active(sk->sk_sleep); 1436 return wq && waitqueue_active(&wq->wait);
1372} 1437}
1373 1438
1374/** 1439/**
@@ -1377,7 +1442,7 @@ static inline int sk_has_sleeper(struct sock *sk)
1377 * @wait_address: socket wait queue 1442 * @wait_address: socket wait queue
1378 * @p: poll_table 1443 * @p: poll_table
1379 * 1444 *
1380 * See the comments in the sk_has_sleeper function. 1445 * See the comments in the wq_has_sleeper function.
1381 */ 1446 */
1382static inline void sock_poll_wait(struct file *filp, 1447static inline void sock_poll_wait(struct file *filp,
1383 wait_queue_head_t *wait_address, poll_table *p) 1448 wait_queue_head_t *wait_address, poll_table *p)
@@ -1388,7 +1453,7 @@ static inline void sock_poll_wait(struct file *filp,
1388 * We need to be sure we are in sync with the 1453 * We need to be sure we are in sync with the
1389 * socket flags modification. 1454 * socket flags modification.
1390 * 1455 *
1391 * This memory barrier is paired in the sk_has_sleeper. 1456 * This memory barrier is paired in the wq_has_sleeper.
1392 */ 1457 */
1393 smp_mb(); 1458 smp_mb();
1394 } 1459 }
@@ -1570,7 +1635,24 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
1570 sk->sk_stamp = kt; 1635 sk->sk_stamp = kt;
1571} 1636}
1572 1637
1573extern void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb); 1638extern void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
1639 struct sk_buff *skb);
1640
1641static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
1642 struct sk_buff *skb)
1643{
1644#define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \
1645 (1UL << SOCK_RCVTSTAMP) | \
1646 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
1647 (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \
1648 (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \
1649 (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE))
1650
1651 if (sk->sk_flags & FLAGS_TS_OR_DROPS)
1652 __sock_recv_ts_and_drops(msg, sk, skb);
1653 else
1654 sk->sk_stamp = skb->tstamp;
1655}
1574 1656
1575/** 1657/**
1576 * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped 1658 * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 70c5159f4b36..fb5c66b2ab81 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -294,6 +294,7 @@ extern struct proto tcp_prot;
294#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field) 294#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
295#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field) 295#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
296#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val) 296#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
297#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
297 298
298extern void tcp_v4_err(struct sk_buff *skb, u32); 299extern void tcp_v4_err(struct sk_buff *skb, u32);
299 300
@@ -939,7 +940,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
939 940
940 tp->ucopy.memory = 0; 941 tp->ucopy.memory = 0;
941 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { 942 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
942 wake_up_interruptible_sync_poll(sk->sk_sleep, 943 wake_up_interruptible_sync_poll(sk_sleep(sk),
943 POLLIN | POLLRDNORM | POLLRDBAND); 944 POLLIN | POLLRDNORM | POLLRDBAND);
944 if (!inet_csk_ack_scheduled(sk)) 945 if (!inet_csk_ack_scheduled(sk))
945 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 946 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
@@ -1032,6 +1033,14 @@ static inline int keepalive_probes(const struct tcp_sock *tp)
1032 return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes; 1033 return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
1033} 1034}
1034 1035
1036static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1037{
1038 const struct inet_connection_sock *icsk = &tp->inet_conn;
1039
1040 return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
1041 tcp_time_stamp - tp->rcv_tstamp);
1042}
1043
1035static inline int tcp_fin_time(const struct sock *sk) 1044static inline int tcp_fin_time(const struct sock *sk)
1036{ 1045{
1037 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout; 1046 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index d65381cad0fc..42a0eb68b7b6 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -44,7 +44,8 @@ extern int datagram_send_ctl(struct net *net,
44 struct msghdr *msg, 44 struct msghdr *msg,
45 struct flowi *fl, 45 struct flowi *fl,
46 struct ipv6_txoptions *opt, 46 struct ipv6_txoptions *opt,
47 int *hlimit, int *tclass); 47 int *hlimit, int *tclass,
48 int *dontfrag);
48 49
49#define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006) 50#define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006)
50 51
diff --git a/include/net/x25device.h b/include/net/x25device.h
index 1415bcf93980..1fa08b49f1c2 100644
--- a/include/net/x25device.h
+++ b/include/net/x25device.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/if_ether.h> 4#include <linux/if_ether.h>
5#include <linux/if_packet.h> 5#include <linux/if_packet.h>
6#include <linux/if_x25.h>
6#include <linux/skbuff.h> 7#include <linux/skbuff.h>
7 8
8static inline __be16 x25_type_trans(struct sk_buff *skb, struct net_device *dev) 9static inline __be16 x25_type_trans(struct sk_buff *skb, struct net_device *dev)
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 4d2289626a84..a8c96212bc1b 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -420,7 +420,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
420 * User space encodes device types as two-byte values, 420 * User space encodes device types as two-byte values,
421 * so we need to recode them 421 * so we need to recode them
422 */ 422 */
423 swdev = old_decode_dev(swap_area.dev); 423 swdev = new_decode_dev(swap_area.dev);
424 if (swdev) { 424 if (swdev) {
425 offset = swap_area.offset; 425 offset = swap_area.offset;
426 data->swap = swap_type_of(swdev, offset, NULL); 426 data->swap = swap_type_of(swdev, offset, NULL);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 63fe25433980..03a7ea1579f6 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -69,6 +69,13 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
69 69
70#ifdef CONFIG_DEBUG_LOCK_ALLOC 70#ifdef CONFIG_DEBUG_LOCK_ALLOC
71 71
72int debug_lockdep_rcu_enabled(void)
73{
74 return rcu_scheduler_active && debug_locks &&
75 current->lockdep_recursion == 0;
76}
77EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
78
72/** 79/**
73 * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? 80 * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section?
74 * 81 *
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 59030570f5ca..937d31dc8566 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -224,7 +224,6 @@ static const struct bin_table bin_net_ipv4_route_table[] = {
224 { CTL_INT, NET_IPV4_ROUTE_MTU_EXPIRES, "mtu_expires" }, 224 { CTL_INT, NET_IPV4_ROUTE_MTU_EXPIRES, "mtu_expires" },
225 { CTL_INT, NET_IPV4_ROUTE_MIN_PMTU, "min_pmtu" }, 225 { CTL_INT, NET_IPV4_ROUTE_MIN_PMTU, "min_pmtu" },
226 { CTL_INT, NET_IPV4_ROUTE_MIN_ADVMSS, "min_adv_mss" }, 226 { CTL_INT, NET_IPV4_ROUTE_MIN_ADVMSS, "min_adv_mss" },
227 { CTL_INT, NET_IPV4_ROUTE_SECRET_INTERVAL, "secret_interval" },
228 {} 227 {}
229}; 228};
230 229
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ff017108700d..935248bdbc47 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -356,7 +356,7 @@ config SLUB_STATS
356config DEBUG_KMEMLEAK 356config DEBUG_KMEMLEAK
357 bool "Kernel memory leak detector" 357 bool "Kernel memory leak detector"
358 depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ 358 depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \
359 (X86 || ARM || PPC || S390 || SUPERH || MICROBLAZE) 359 (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE)
360 360
361 select DEBUG_FS if SYSFS 361 select DEBUG_FS if SYSFS
362 select STACKTRACE if STACKTRACE_SUPPORT 362 select STACKTRACE if STACKTRACE_SUPPORT
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index ba8b67039d13..01e64270e246 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -570,7 +570,7 @@ static ssize_t filter_write(struct file *file, const char __user *userbuf,
570 * Now parse out the first token and use it as the name for the 570 * Now parse out the first token and use it as the name for the
571 * driver to filter for. 571 * driver to filter for.
572 */ 572 */
573 for (i = 0; i < NAME_MAX_LEN; ++i) { 573 for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
574 current_driver_name[i] = buf[i]; 574 current_driver_name[i] = buf[i];
575 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) 575 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
576 break; 576 break;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 24112e5a5780..7376b7c55ffe 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -408,12 +408,12 @@ enum format_type {
408}; 408};
409 409
410struct printf_spec { 410struct printf_spec {
411 u16 type; 411 u8 type; /* format_type enum */
412 s16 field_width; /* width of output field */
413 u8 flags; /* flags to number() */ 412 u8 flags; /* flags to number() */
414 u8 base; 413 u8 base; /* number base, 8, 10 or 16 only */
415 s8 precision; /* # of digits/chars */ 414 u8 qualifier; /* number qualifier, one of 'hHlLtzZ' */
416 u8 qualifier; 415 s16 field_width; /* width of output field */
416 s16 precision; /* # of digits/chars */
417}; 417};
418 418
419static char *number(char *buf, char *end, unsigned long long num, 419static char *number(char *buf, char *end, unsigned long long num,
diff --git a/mm/mmap.c b/mm/mmap.c
index 75557c639ad4..f90ea92f755a 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -507,11 +507,12 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start,
507 struct address_space *mapping = NULL; 507 struct address_space *mapping = NULL;
508 struct prio_tree_root *root = NULL; 508 struct prio_tree_root *root = NULL;
509 struct file *file = vma->vm_file; 509 struct file *file = vma->vm_file;
510 struct anon_vma *anon_vma = NULL;
511 long adjust_next = 0; 510 long adjust_next = 0;
512 int remove_next = 0; 511 int remove_next = 0;
513 512
514 if (next && !insert) { 513 if (next && !insert) {
514 struct vm_area_struct *exporter = NULL;
515
515 if (end >= next->vm_end) { 516 if (end >= next->vm_end) {
516 /* 517 /*
517 * vma expands, overlapping all the next, and 518 * vma expands, overlapping all the next, and
@@ -519,7 +520,7 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start,
519 */ 520 */
520again: remove_next = 1 + (end > next->vm_end); 521again: remove_next = 1 + (end > next->vm_end);
521 end = next->vm_end; 522 end = next->vm_end;
522 anon_vma = next->anon_vma; 523 exporter = next;
523 importer = vma; 524 importer = vma;
524 } else if (end > next->vm_start) { 525 } else if (end > next->vm_start) {
525 /* 526 /*
@@ -527,7 +528,7 @@ again: remove_next = 1 + (end > next->vm_end);
527 * mprotect case 5 shifting the boundary up. 528 * mprotect case 5 shifting the boundary up.
528 */ 529 */
529 adjust_next = (end - next->vm_start) >> PAGE_SHIFT; 530 adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
530 anon_vma = next->anon_vma; 531 exporter = next;
531 importer = vma; 532 importer = vma;
532 } else if (end < vma->vm_end) { 533 } else if (end < vma->vm_end) {
533 /* 534 /*
@@ -536,28 +537,19 @@ again: remove_next = 1 + (end > next->vm_end);
536 * mprotect case 4 shifting the boundary down. 537 * mprotect case 4 shifting the boundary down.
537 */ 538 */
538 adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT); 539 adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
539 anon_vma = next->anon_vma; 540 exporter = vma;
540 importer = next; 541 importer = next;
541 } 542 }
542 }
543 543
544 /*
545 * When changing only vma->vm_end, we don't really need anon_vma lock.
546 */
547 if (vma->anon_vma && (insert || importer || start != vma->vm_start))
548 anon_vma = vma->anon_vma;
549 if (anon_vma) {
550 /* 544 /*
551 * Easily overlooked: when mprotect shifts the boundary, 545 * Easily overlooked: when mprotect shifts the boundary,
552 * make sure the expanding vma has anon_vma set if the 546 * make sure the expanding vma has anon_vma set if the
553 * shrinking vma had, to cover any anon pages imported. 547 * shrinking vma had, to cover any anon pages imported.
554 */ 548 */
555 if (importer && !importer->anon_vma) { 549 if (exporter && exporter->anon_vma && !importer->anon_vma) {
556 /* Block reverse map lookups until things are set up. */ 550 if (anon_vma_clone(importer, exporter))
557 if (anon_vma_clone(importer, vma)) {
558 return -ENOMEM; 551 return -ENOMEM;
559 } 552 importer->anon_vma = exporter->anon_vma;
560 importer->anon_vma = anon_vma;
561 } 553 }
562 } 554 }
563 555
@@ -825,6 +817,61 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
825} 817}
826 818
827/* 819/*
820 * Rough compatbility check to quickly see if it's even worth looking
821 * at sharing an anon_vma.
822 *
823 * They need to have the same vm_file, and the flags can only differ
824 * in things that mprotect may change.
825 *
826 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
827 * we can merge the two vma's. For example, we refuse to merge a vma if
828 * there is a vm_ops->close() function, because that indicates that the
829 * driver is doing some kind of reference counting. But that doesn't
830 * really matter for the anon_vma sharing case.
831 */
832static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
833{
834 return a->vm_end == b->vm_start &&
835 mpol_equal(vma_policy(a), vma_policy(b)) &&
836 a->vm_file == b->vm_file &&
837 !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) &&
838 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
839}
840
841/*
842 * Do some basic sanity checking to see if we can re-use the anon_vma
843 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
844 * the same as 'old', the other will be the new one that is trying
845 * to share the anon_vma.
846 *
847 * NOTE! This runs with mm_sem held for reading, so it is possible that
848 * the anon_vma of 'old' is concurrently in the process of being set up
849 * by another page fault trying to merge _that_. But that's ok: if it
850 * is being set up, that automatically means that it will be a singleton
851 * acceptable for merging, so we can do all of this optimistically. But
852 * we do that ACCESS_ONCE() to make sure that we never re-load the pointer.
853 *
854 * IOW: that the "list_is_singular()" test on the anon_vma_chain only
855 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
856 * is to return an anon_vma that is "complex" due to having gone through
857 * a fork).
858 *
859 * We also make sure that the two vma's are compatible (adjacent,
860 * and with the same memory policies). That's all stable, even with just
861 * a read lock on the mm_sem.
862 */
863static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
864{
865 if (anon_vma_compatible(a, b)) {
866 struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma);
867
868 if (anon_vma && list_is_singular(&old->anon_vma_chain))
869 return anon_vma;
870 }
871 return NULL;
872}
873
874/*
828 * find_mergeable_anon_vma is used by anon_vma_prepare, to check 875 * find_mergeable_anon_vma is used by anon_vma_prepare, to check
829 * neighbouring vmas for a suitable anon_vma, before it goes off 876 * neighbouring vmas for a suitable anon_vma, before it goes off
830 * to allocate a new anon_vma. It checks because a repetitive 877 * to allocate a new anon_vma. It checks because a repetitive
@@ -834,28 +881,16 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
834 */ 881 */
835struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) 882struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
836{ 883{
884 struct anon_vma *anon_vma;
837 struct vm_area_struct *near; 885 struct vm_area_struct *near;
838 unsigned long vm_flags;
839 886
840 near = vma->vm_next; 887 near = vma->vm_next;
841 if (!near) 888 if (!near)
842 goto try_prev; 889 goto try_prev;
843 890
844 /* 891 anon_vma = reusable_anon_vma(near, vma, near);
845 * Since only mprotect tries to remerge vmas, match flags 892 if (anon_vma)
846 * which might be mprotected into each other later on. 893 return anon_vma;
847 * Neither mlock nor madvise tries to remerge at present,
848 * so leave their flags as obstructing a merge.
849 */
850 vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC);
851 vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC);
852
853 if (near->anon_vma && vma->vm_end == near->vm_start &&
854 mpol_equal(vma_policy(vma), vma_policy(near)) &&
855 can_vma_merge_before(near, vm_flags,
856 NULL, vma->vm_file, vma->vm_pgoff +
857 ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)))
858 return near->anon_vma;
859try_prev: 894try_prev:
860 /* 895 /*
861 * It is potentially slow to have to call find_vma_prev here. 896 * It is potentially slow to have to call find_vma_prev here.
@@ -868,14 +903,9 @@ try_prev:
868 if (!near) 903 if (!near)
869 goto none; 904 goto none;
870 905
871 vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC); 906 anon_vma = reusable_anon_vma(near, near, vma);
872 vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC); 907 if (anon_vma)
873 908 return anon_vma;
874 if (near->anon_vma && near->vm_end == vma->vm_start &&
875 mpol_equal(vma_policy(near), vma_policy(vma)) &&
876 can_vma_merge_after(near, vm_flags,
877 NULL, vma->vm_file, vma->vm_pgoff))
878 return near->anon_vma;
879none: 909none:
880 /* 910 /*
881 * There's no absolute need to look only at touching neighbours: 911 * There's no absolute need to look only at touching neighbours:
diff --git a/mm/rmap.c b/mm/rmap.c
index eaa7a09eb72e..526704e8215d 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -182,7 +182,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
182{ 182{
183 struct anon_vma_chain *avc, *pavc; 183 struct anon_vma_chain *avc, *pavc;
184 184
185 list_for_each_entry(pavc, &src->anon_vma_chain, same_vma) { 185 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
186 avc = anon_vma_chain_alloc(); 186 avc = anon_vma_chain_alloc();
187 if (!avc) 187 if (!avc)
188 goto enomem_failure; 188 goto enomem_failure;
@@ -730,13 +730,29 @@ void page_move_anon_rmap(struct page *page,
730 * @page: the page to add the mapping to 730 * @page: the page to add the mapping to
731 * @vma: the vm area in which the mapping is added 731 * @vma: the vm area in which the mapping is added
732 * @address: the user virtual address mapped 732 * @address: the user virtual address mapped
733 * @exclusive: the page is exclusively owned by the current process
733 */ 734 */
734static void __page_set_anon_rmap(struct page *page, 735static void __page_set_anon_rmap(struct page *page,
735 struct vm_area_struct *vma, unsigned long address) 736 struct vm_area_struct *vma, unsigned long address, int exclusive)
736{ 737{
737 struct anon_vma *anon_vma = vma->anon_vma; 738 struct anon_vma *anon_vma = vma->anon_vma;
738 739
739 BUG_ON(!anon_vma); 740 BUG_ON(!anon_vma);
741
742 /*
743 * If the page isn't exclusively mapped into this vma,
744 * we must use the _oldest_ possible anon_vma for the
745 * page mapping!
746 *
747 * So take the last AVC chain entry in the vma, which is
748 * the deepest ancestor, and use the anon_vma from that.
749 */
750 if (!exclusive) {
751 struct anon_vma_chain *avc;
752 avc = list_entry(vma->anon_vma_chain.prev, struct anon_vma_chain, same_vma);
753 anon_vma = avc->anon_vma;
754 }
755
740 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 756 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
741 page->mapping = (struct address_space *) anon_vma; 757 page->mapping = (struct address_space *) anon_vma;
742 page->index = linear_page_index(vma, address); 758 page->index = linear_page_index(vma, address);
@@ -791,7 +807,7 @@ void page_add_anon_rmap(struct page *page,
791 VM_BUG_ON(!PageLocked(page)); 807 VM_BUG_ON(!PageLocked(page));
792 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 808 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
793 if (first) 809 if (first)
794 __page_set_anon_rmap(page, vma, address); 810 __page_set_anon_rmap(page, vma, address, 0);
795 else 811 else
796 __page_check_anon_rmap(page, vma, address); 812 __page_check_anon_rmap(page, vma, address);
797} 813}
@@ -813,7 +829,7 @@ void page_add_new_anon_rmap(struct page *page,
813 SetPageSwapBacked(page); 829 SetPageSwapBacked(page);
814 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ 830 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
815 __inc_zone_page_state(page, NR_ANON_PAGES); 831 __inc_zone_page_state(page, NR_ANON_PAGES);
816 __page_set_anon_rmap(page, vma, address); 832 __page_set_anon_rmap(page, vma, address, 1);
817 if (page_evictable(page, vma)) 833 if (page_evictable(page, vma))
818 lru_cache_add_lru(page, LRU_ACTIVE_ANON); 834 lru_cache_add_lru(page, LRU_ACTIVE_ANON);
819 else 835 else
diff --git a/net/atm/common.c b/net/atm/common.c
index 97ed94aa0cbc..b43feb1a3995 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -90,10 +90,13 @@ static void vcc_sock_destruct(struct sock *sk)
90 90
91static void vcc_def_wakeup(struct sock *sk) 91static void vcc_def_wakeup(struct sock *sk)
92{ 92{
93 read_lock(&sk->sk_callback_lock); 93 struct socket_wq *wq;
94 if (sk_has_sleeper(sk)) 94
95 wake_up(sk->sk_sleep); 95 rcu_read_lock();
96 read_unlock(&sk->sk_callback_lock); 96 wq = rcu_dereference(sk->sk_wq);
97 if (wq_has_sleeper(wq))
98 wake_up(&wq->wait);
99 rcu_read_unlock();
97} 100}
98 101
99static inline int vcc_writable(struct sock *sk) 102static inline int vcc_writable(struct sock *sk)
@@ -106,16 +109,19 @@ static inline int vcc_writable(struct sock *sk)
106 109
107static void vcc_write_space(struct sock *sk) 110static void vcc_write_space(struct sock *sk)
108{ 111{
109 read_lock(&sk->sk_callback_lock); 112 struct socket_wq *wq;
113
114 rcu_read_lock();
110 115
111 if (vcc_writable(sk)) { 116 if (vcc_writable(sk)) {
112 if (sk_has_sleeper(sk)) 117 wq = rcu_dereference(sk->sk_wq);
113 wake_up_interruptible(sk->sk_sleep); 118 if (wq_has_sleeper(wq))
119 wake_up_interruptible(&wq->wait);
114 120
115 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 121 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
116 } 122 }
117 123
118 read_unlock(&sk->sk_callback_lock); 124 rcu_read_unlock();
119} 125}
120 126
121static struct proto vcc_proto = { 127static struct proto vcc_proto = {
@@ -549,7 +555,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
549 } 555 }
550 556
551 eff = (size+3) & ~3; /* align to word boundary */ 557 eff = (size+3) & ~3; /* align to word boundary */
552 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 558 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
553 error = 0; 559 error = 0;
554 while (!(skb = alloc_tx(vcc, eff))) { 560 while (!(skb = alloc_tx(vcc, eff))) {
555 if (m->msg_flags & MSG_DONTWAIT) { 561 if (m->msg_flags & MSG_DONTWAIT) {
@@ -568,9 +574,9 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
568 send_sig(SIGPIPE, current, 0); 574 send_sig(SIGPIPE, current, 0);
569 break; 575 break;
570 } 576 }
571 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 577 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
572 } 578 }
573 finish_wait(sk->sk_sleep, &wait); 579 finish_wait(sk_sleep(sk), &wait);
574 if (error) 580 if (error)
575 goto out; 581 goto out;
576 skb->dev = NULL; /* for paths shared with net_device interfaces */ 582 skb->dev = NULL; /* for paths shared with net_device interfaces */
@@ -595,7 +601,7 @@ unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
595 struct atm_vcc *vcc; 601 struct atm_vcc *vcc;
596 unsigned int mask; 602 unsigned int mask;
597 603
598 sock_poll_wait(file, sk->sk_sleep, wait); 604 sock_poll_wait(file, sk_sleep(sk), wait);
599 mask = 0; 605 mask = 0;
600 606
601 vcc = ATM_SD(sock); 607 vcc = ATM_SD(sock);
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 6ba6e466ee54..509c8ac02b63 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -131,7 +131,7 @@ static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
131 } 131 }
132 sk->sk_ack_backlog++; 132 sk->sk_ack_backlog++;
133 skb_queue_tail(&sk->sk_receive_queue, skb); 133 skb_queue_tail(&sk->sk_receive_queue, skb);
134 pr_debug("waking sk->sk_sleep 0x%p\n", sk->sk_sleep); 134 pr_debug("waking sk_sleep(sk) 0x%p\n", sk_sleep(sk));
135 sk->sk_state_change(sk); 135 sk->sk_state_change(sk);
136as_indicate_complete: 136as_indicate_complete:
137 release_sock(sk); 137 release_sock(sk);
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 3ba9a45a51ac..754ee4791d96 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -49,14 +49,14 @@ static void svc_disconnect(struct atm_vcc *vcc)
49 49
50 pr_debug("%p\n", vcc); 50 pr_debug("%p\n", vcc);
51 if (test_bit(ATM_VF_REGIS, &vcc->flags)) { 51 if (test_bit(ATM_VF_REGIS, &vcc->flags)) {
52 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 52 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
53 sigd_enq(vcc, as_close, NULL, NULL, NULL); 53 sigd_enq(vcc, as_close, NULL, NULL, NULL);
54 while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { 54 while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) {
55 schedule(); 55 schedule();
56 prepare_to_wait(sk->sk_sleep, &wait, 56 prepare_to_wait(sk_sleep(sk), &wait,
57 TASK_UNINTERRUPTIBLE); 57 TASK_UNINTERRUPTIBLE);
58 } 58 }
59 finish_wait(sk->sk_sleep, &wait); 59 finish_wait(sk_sleep(sk), &wait);
60 } 60 }
61 /* beware - socket is still in use by atmsigd until the last 61 /* beware - socket is still in use by atmsigd until the last
62 as_indicate has been answered */ 62 as_indicate has been answered */
@@ -125,13 +125,13 @@ static int svc_bind(struct socket *sock, struct sockaddr *sockaddr,
125 } 125 }
126 vcc->local = *addr; 126 vcc->local = *addr;
127 set_bit(ATM_VF_WAITING, &vcc->flags); 127 set_bit(ATM_VF_WAITING, &vcc->flags);
128 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 128 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
129 sigd_enq(vcc, as_bind, NULL, NULL, &vcc->local); 129 sigd_enq(vcc, as_bind, NULL, NULL, &vcc->local);
130 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 130 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
131 schedule(); 131 schedule();
132 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 132 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
133 } 133 }
134 finish_wait(sk->sk_sleep, &wait); 134 finish_wait(sk_sleep(sk), &wait);
135 clear_bit(ATM_VF_REGIS, &vcc->flags); /* doesn't count */ 135 clear_bit(ATM_VF_REGIS, &vcc->flags); /* doesn't count */
136 if (!sigd) { 136 if (!sigd) {
137 error = -EUNATCH; 137 error = -EUNATCH;
@@ -201,10 +201,10 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
201 } 201 }
202 vcc->remote = *addr; 202 vcc->remote = *addr;
203 set_bit(ATM_VF_WAITING, &vcc->flags); 203 set_bit(ATM_VF_WAITING, &vcc->flags);
204 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 204 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
205 sigd_enq(vcc, as_connect, NULL, NULL, &vcc->remote); 205 sigd_enq(vcc, as_connect, NULL, NULL, &vcc->remote);
206 if (flags & O_NONBLOCK) { 206 if (flags & O_NONBLOCK) {
207 finish_wait(sk->sk_sleep, &wait); 207 finish_wait(sk_sleep(sk), &wait);
208 sock->state = SS_CONNECTING; 208 sock->state = SS_CONNECTING;
209 error = -EINPROGRESS; 209 error = -EINPROGRESS;
210 goto out; 210 goto out;
@@ -213,7 +213,7 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
213 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 213 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
214 schedule(); 214 schedule();
215 if (!signal_pending(current)) { 215 if (!signal_pending(current)) {
216 prepare_to_wait(sk->sk_sleep, &wait, 216 prepare_to_wait(sk_sleep(sk), &wait,
217 TASK_INTERRUPTIBLE); 217 TASK_INTERRUPTIBLE);
218 continue; 218 continue;
219 } 219 }
@@ -232,14 +232,14 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
232 */ 232 */
233 sigd_enq(vcc, as_close, NULL, NULL, NULL); 233 sigd_enq(vcc, as_close, NULL, NULL, NULL);
234 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 234 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
235 prepare_to_wait(sk->sk_sleep, &wait, 235 prepare_to_wait(sk_sleep(sk), &wait,
236 TASK_INTERRUPTIBLE); 236 TASK_INTERRUPTIBLE);
237 schedule(); 237 schedule();
238 } 238 }
239 if (!sk->sk_err) 239 if (!sk->sk_err)
240 while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && 240 while (!test_bit(ATM_VF_RELEASED, &vcc->flags) &&
241 sigd) { 241 sigd) {
242 prepare_to_wait(sk->sk_sleep, &wait, 242 prepare_to_wait(sk_sleep(sk), &wait,
243 TASK_INTERRUPTIBLE); 243 TASK_INTERRUPTIBLE);
244 schedule(); 244 schedule();
245 } 245 }
@@ -250,7 +250,7 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
250 error = -EINTR; 250 error = -EINTR;
251 break; 251 break;
252 } 252 }
253 finish_wait(sk->sk_sleep, &wait); 253 finish_wait(sk_sleep(sk), &wait);
254 if (error) 254 if (error)
255 goto out; 255 goto out;
256 if (!sigd) { 256 if (!sigd) {
@@ -302,13 +302,13 @@ static int svc_listen(struct socket *sock, int backlog)
302 goto out; 302 goto out;
303 } 303 }
304 set_bit(ATM_VF_WAITING, &vcc->flags); 304 set_bit(ATM_VF_WAITING, &vcc->flags);
305 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 305 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
306 sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local); 306 sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local);
307 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 307 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
308 schedule(); 308 schedule();
309 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 309 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
310 } 310 }
311 finish_wait(sk->sk_sleep, &wait); 311 finish_wait(sk_sleep(sk), &wait);
312 if (!sigd) { 312 if (!sigd) {
313 error = -EUNATCH; 313 error = -EUNATCH;
314 goto out; 314 goto out;
@@ -343,7 +343,7 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
343 while (1) { 343 while (1) {
344 DEFINE_WAIT(wait); 344 DEFINE_WAIT(wait);
345 345
346 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 346 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
347 while (!(skb = skb_dequeue(&sk->sk_receive_queue)) && 347 while (!(skb = skb_dequeue(&sk->sk_receive_queue)) &&
348 sigd) { 348 sigd) {
349 if (test_bit(ATM_VF_RELEASED, &old_vcc->flags)) 349 if (test_bit(ATM_VF_RELEASED, &old_vcc->flags))
@@ -363,10 +363,10 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
363 error = -ERESTARTSYS; 363 error = -ERESTARTSYS;
364 break; 364 break;
365 } 365 }
366 prepare_to_wait(sk->sk_sleep, &wait, 366 prepare_to_wait(sk_sleep(sk), &wait,
367 TASK_INTERRUPTIBLE); 367 TASK_INTERRUPTIBLE);
368 } 368 }
369 finish_wait(sk->sk_sleep, &wait); 369 finish_wait(sk_sleep(sk), &wait);
370 if (error) 370 if (error)
371 goto out; 371 goto out;
372 if (!skb) { 372 if (!skb) {
@@ -392,17 +392,17 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
392 } 392 }
393 /* wait should be short, so we ignore the non-blocking flag */ 393 /* wait should be short, so we ignore the non-blocking flag */
394 set_bit(ATM_VF_WAITING, &new_vcc->flags); 394 set_bit(ATM_VF_WAITING, &new_vcc->flags);
395 prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait, 395 prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait,
396 TASK_UNINTERRUPTIBLE); 396 TASK_UNINTERRUPTIBLE);
397 sigd_enq(new_vcc, as_accept, old_vcc, NULL, NULL); 397 sigd_enq(new_vcc, as_accept, old_vcc, NULL, NULL);
398 while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) { 398 while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) {
399 release_sock(sk); 399 release_sock(sk);
400 schedule(); 400 schedule();
401 lock_sock(sk); 401 lock_sock(sk);
402 prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait, 402 prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait,
403 TASK_UNINTERRUPTIBLE); 403 TASK_UNINTERRUPTIBLE);
404 } 404 }
405 finish_wait(sk_atm(new_vcc)->sk_sleep, &wait); 405 finish_wait(sk_sleep(sk_atm(new_vcc)), &wait);
406 if (!sigd) { 406 if (!sigd) {
407 error = -EUNATCH; 407 error = -EUNATCH;
408 goto out; 408 goto out;
@@ -438,14 +438,14 @@ int svc_change_qos(struct atm_vcc *vcc, struct atm_qos *qos)
438 DEFINE_WAIT(wait); 438 DEFINE_WAIT(wait);
439 439
440 set_bit(ATM_VF_WAITING, &vcc->flags); 440 set_bit(ATM_VF_WAITING, &vcc->flags);
441 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 441 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
442 sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0); 442 sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0);
443 while (test_bit(ATM_VF_WAITING, &vcc->flags) && 443 while (test_bit(ATM_VF_WAITING, &vcc->flags) &&
444 !test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { 444 !test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) {
445 schedule(); 445 schedule();
446 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 446 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
447 } 447 }
448 finish_wait(sk->sk_sleep, &wait); 448 finish_wait(sk_sleep(sk), &wait);
449 if (!sigd) 449 if (!sigd)
450 return -EUNATCH; 450 return -EUNATCH;
451 return -sk->sk_err; 451 return -sk->sk_err;
@@ -534,20 +534,20 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
534 534
535 lock_sock(sk); 535 lock_sock(sk);
536 set_bit(ATM_VF_WAITING, &vcc->flags); 536 set_bit(ATM_VF_WAITING, &vcc->flags);
537 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 537 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
538 sigd_enq(vcc, as_addparty, NULL, NULL, 538 sigd_enq(vcc, as_addparty, NULL, NULL,
539 (struct sockaddr_atmsvc *) sockaddr); 539 (struct sockaddr_atmsvc *) sockaddr);
540 if (flags & O_NONBLOCK) { 540 if (flags & O_NONBLOCK) {
541 finish_wait(sk->sk_sleep, &wait); 541 finish_wait(sk_sleep(sk), &wait);
542 error = -EINPROGRESS; 542 error = -EINPROGRESS;
543 goto out; 543 goto out;
544 } 544 }
545 pr_debug("added wait queue\n"); 545 pr_debug("added wait queue\n");
546 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 546 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
547 schedule(); 547 schedule();
548 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 548 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
549 } 549 }
550 finish_wait(sk->sk_sleep, &wait); 550 finish_wait(sk_sleep(sk), &wait);
551 error = xchg(&sk->sk_err_soft, 0); 551 error = xchg(&sk->sk_err_soft, 0);
552out: 552out:
553 release_sock(sk); 553 release_sock(sk);
@@ -563,13 +563,13 @@ static int svc_dropparty(struct socket *sock, int ep_ref)
563 563
564 lock_sock(sk); 564 lock_sock(sk);
565 set_bit(ATM_VF_WAITING, &vcc->flags); 565 set_bit(ATM_VF_WAITING, &vcc->flags);
566 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 566 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
567 sigd_enq2(vcc, as_dropparty, NULL, NULL, NULL, NULL, ep_ref); 567 sigd_enq2(vcc, as_dropparty, NULL, NULL, NULL, NULL, ep_ref);
568 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 568 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
569 schedule(); 569 schedule();
570 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 570 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
571 } 571 }
572 finish_wait(sk->sk_sleep, &wait); 572 finish_wait(sk_sleep(sk), &wait);
573 if (!sigd) { 573 if (!sigd) {
574 error = -EUNATCH; 574 error = -EUNATCH;
575 goto out; 575 goto out;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 65c5801261f9..cfdfd7e2a172 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1281,7 +1281,7 @@ static int __must_check ax25_connect(struct socket *sock,
1281 DEFINE_WAIT(wait); 1281 DEFINE_WAIT(wait);
1282 1282
1283 for (;;) { 1283 for (;;) {
1284 prepare_to_wait(sk->sk_sleep, &wait, 1284 prepare_to_wait(sk_sleep(sk), &wait,
1285 TASK_INTERRUPTIBLE); 1285 TASK_INTERRUPTIBLE);
1286 if (sk->sk_state != TCP_SYN_SENT) 1286 if (sk->sk_state != TCP_SYN_SENT)
1287 break; 1287 break;
@@ -1294,7 +1294,7 @@ static int __must_check ax25_connect(struct socket *sock,
1294 err = -ERESTARTSYS; 1294 err = -ERESTARTSYS;
1295 break; 1295 break;
1296 } 1296 }
1297 finish_wait(sk->sk_sleep, &wait); 1297 finish_wait(sk_sleep(sk), &wait);
1298 1298
1299 if (err) 1299 if (err)
1300 goto out_release; 1300 goto out_release;
@@ -1346,7 +1346,7 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
1346 * hooked into the SABM we saved 1346 * hooked into the SABM we saved
1347 */ 1347 */
1348 for (;;) { 1348 for (;;) {
1349 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1349 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1350 skb = skb_dequeue(&sk->sk_receive_queue); 1350 skb = skb_dequeue(&sk->sk_receive_queue);
1351 if (skb) 1351 if (skb)
1352 break; 1352 break;
@@ -1364,7 +1364,7 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
1364 err = -ERESTARTSYS; 1364 err = -ERESTARTSYS;
1365 break; 1365 break;
1366 } 1366 }
1367 finish_wait(sk->sk_sleep, &wait); 1367 finish_wait(sk_sleep(sk), &wait);
1368 1368
1369 if (err) 1369 if (err)
1370 goto out; 1370 goto out;
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 404a8500fd03..421c45bd1b95 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -288,7 +288,7 @@ unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *w
288 288
289 BT_DBG("sock %p, sk %p", sock, sk); 289 BT_DBG("sock %p, sk %p", sock, sk);
290 290
291 poll_wait(file, sk->sk_sleep, wait); 291 poll_wait(file, sk_sleep(sk), wait);
292 292
293 if (sk->sk_state == BT_LISTEN) 293 if (sk->sk_state == BT_LISTEN)
294 return bt_accept_poll(sk); 294 return bt_accept_poll(sk);
@@ -378,7 +378,7 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
378 378
379 BT_DBG("sk %p", sk); 379 BT_DBG("sk %p", sk);
380 380
381 add_wait_queue(sk->sk_sleep, &wait); 381 add_wait_queue(sk_sleep(sk), &wait);
382 while (sk->sk_state != state) { 382 while (sk->sk_state != state) {
383 set_current_state(TASK_INTERRUPTIBLE); 383 set_current_state(TASK_INTERRUPTIBLE);
384 384
@@ -401,7 +401,7 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
401 break; 401 break;
402 } 402 }
403 set_current_state(TASK_RUNNING); 403 set_current_state(TASK_RUNNING);
404 remove_wait_queue(sk->sk_sleep, &wait); 404 remove_wait_queue(sk_sleep(sk), &wait);
405 return err; 405 return err;
406} 406}
407EXPORT_SYMBOL(bt_sock_wait_state); 407EXPORT_SYMBOL(bt_sock_wait_state);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 8062dad6d10d..f10b41fb05a0 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -474,7 +474,7 @@ static int bnep_session(void *arg)
474 set_user_nice(current, -15); 474 set_user_nice(current, -15);
475 475
476 init_waitqueue_entry(&wait, current); 476 init_waitqueue_entry(&wait, current);
477 add_wait_queue(sk->sk_sleep, &wait); 477 add_wait_queue(sk_sleep(sk), &wait);
478 while (!atomic_read(&s->killed)) { 478 while (!atomic_read(&s->killed)) {
479 set_current_state(TASK_INTERRUPTIBLE); 479 set_current_state(TASK_INTERRUPTIBLE);
480 480
@@ -496,7 +496,7 @@ static int bnep_session(void *arg)
496 schedule(); 496 schedule();
497 } 497 }
498 set_current_state(TASK_RUNNING); 498 set_current_state(TASK_RUNNING);
499 remove_wait_queue(sk->sk_sleep, &wait); 499 remove_wait_queue(sk_sleep(sk), &wait);
500 500
501 /* Cleanup session */ 501 /* Cleanup session */
502 down_write(&bnep_session_sem); 502 down_write(&bnep_session_sem);
@@ -507,7 +507,7 @@ static int bnep_session(void *arg)
507 /* Wakeup user-space polling for socket errors */ 507 /* Wakeup user-space polling for socket errors */
508 s->sock->sk->sk_err = EUNATCH; 508 s->sock->sk->sk_err = EUNATCH;
509 509
510 wake_up_interruptible(s->sock->sk->sk_sleep); 510 wake_up_interruptible(sk_sleep(s->sock->sk));
511 511
512 /* Release the socket */ 512 /* Release the socket */
513 fput(s->sock->file); 513 fput(s->sock->file);
@@ -638,7 +638,7 @@ int bnep_del_connection(struct bnep_conndel_req *req)
638 638
639 /* Kill session thread */ 639 /* Kill session thread */
640 atomic_inc(&s->killed); 640 atomic_inc(&s->killed);
641 wake_up_interruptible(s->sock->sk->sk_sleep); 641 wake_up_interruptible(sk_sleep(s->sock->sk));
642 } else 642 } else
643 err = -ENOENT; 643 err = -ENOENT;
644 644
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index d48b33f4d4ba..0faad5ce6dc4 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -109,7 +109,7 @@ static void bnep_net_set_mc_list(struct net_device *dev)
109 } 109 }
110 110
111 skb_queue_tail(&sk->sk_write_queue, skb); 111 skb_queue_tail(&sk->sk_write_queue, skb);
112 wake_up_interruptible(sk->sk_sleep); 112 wake_up_interruptible(sk_sleep(sk));
113#endif 113#endif
114} 114}
115 115
@@ -193,11 +193,11 @@ static netdev_tx_t bnep_net_xmit(struct sk_buff *skb,
193 /* 193 /*
194 * We cannot send L2CAP packets from here as we are potentially in a bh. 194 * We cannot send L2CAP packets from here as we are potentially in a bh.
195 * So we have to queue them and wake up session thread which is sleeping 195 * So we have to queue them and wake up session thread which is sleeping
196 * on the sk->sk_sleep. 196 * on the sk_sleep(sk).
197 */ 197 */
198 dev->trans_start = jiffies; 198 dev->trans_start = jiffies;
199 skb_queue_tail(&sk->sk_write_queue, skb); 199 skb_queue_tail(&sk->sk_write_queue, skb);
200 wake_up_interruptible(sk->sk_sleep); 200 wake_up_interruptible(sk_sleep(sk));
201 201
202 if (skb_queue_len(&sk->sk_write_queue) >= BNEP_TX_QUEUE_LEN) { 202 if (skb_queue_len(&sk->sk_write_queue) >= BNEP_TX_QUEUE_LEN) {
203 BT_DBG("tx queue is full"); 203 BT_DBG("tx queue is full");
diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h
index e4663aa14d26..785e79e953c5 100644
--- a/net/bluetooth/cmtp/cmtp.h
+++ b/net/bluetooth/cmtp/cmtp.h
@@ -125,7 +125,7 @@ static inline void cmtp_schedule(struct cmtp_session *session)
125{ 125{
126 struct sock *sk = session->sock->sk; 126 struct sock *sk = session->sock->sk;
127 127
128 wake_up_interruptible(sk->sk_sleep); 128 wake_up_interruptible(sk_sleep(sk));
129} 129}
130 130
131/* CMTP init defines */ 131/* CMTP init defines */
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 0073ec8495da..d4c6af082d48 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -284,7 +284,7 @@ static int cmtp_session(void *arg)
284 set_user_nice(current, -15); 284 set_user_nice(current, -15);
285 285
286 init_waitqueue_entry(&wait, current); 286 init_waitqueue_entry(&wait, current);
287 add_wait_queue(sk->sk_sleep, &wait); 287 add_wait_queue(sk_sleep(sk), &wait);
288 while (!atomic_read(&session->terminate)) { 288 while (!atomic_read(&session->terminate)) {
289 set_current_state(TASK_INTERRUPTIBLE); 289 set_current_state(TASK_INTERRUPTIBLE);
290 290
@@ -301,7 +301,7 @@ static int cmtp_session(void *arg)
301 schedule(); 301 schedule();
302 } 302 }
303 set_current_state(TASK_RUNNING); 303 set_current_state(TASK_RUNNING);
304 remove_wait_queue(sk->sk_sleep, &wait); 304 remove_wait_queue(sk_sleep(sk), &wait);
305 305
306 down_write(&cmtp_session_sem); 306 down_write(&cmtp_session_sem);
307 307
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 280529ad9274..bfe641b7dfaf 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -561,8 +561,8 @@ static int hidp_session(void *arg)
561 561
562 init_waitqueue_entry(&ctrl_wait, current); 562 init_waitqueue_entry(&ctrl_wait, current);
563 init_waitqueue_entry(&intr_wait, current); 563 init_waitqueue_entry(&intr_wait, current);
564 add_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait); 564 add_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
565 add_wait_queue(intr_sk->sk_sleep, &intr_wait); 565 add_wait_queue(sk_sleep(intr_sk), &intr_wait);
566 while (!atomic_read(&session->terminate)) { 566 while (!atomic_read(&session->terminate)) {
567 set_current_state(TASK_INTERRUPTIBLE); 567 set_current_state(TASK_INTERRUPTIBLE);
568 568
@@ -584,8 +584,8 @@ static int hidp_session(void *arg)
584 schedule(); 584 schedule();
585 } 585 }
586 set_current_state(TASK_RUNNING); 586 set_current_state(TASK_RUNNING);
587 remove_wait_queue(intr_sk->sk_sleep, &intr_wait); 587 remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
588 remove_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait); 588 remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
589 589
590 down_write(&hidp_session_sem); 590 down_write(&hidp_session_sem);
591 591
@@ -609,7 +609,7 @@ static int hidp_session(void *arg)
609 609
610 fput(session->intr_sock->file); 610 fput(session->intr_sock->file);
611 611
612 wait_event_timeout(*(ctrl_sk->sk_sleep), 612 wait_event_timeout(*(sk_sleep(ctrl_sk)),
613 (ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500)); 613 (ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500));
614 614
615 fput(session->ctrl_sock->file); 615 fput(session->ctrl_sock->file);
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index a4e215d50c10..8d934a19da0a 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -164,8 +164,8 @@ static inline void hidp_schedule(struct hidp_session *session)
164 struct sock *ctrl_sk = session->ctrl_sock->sk; 164 struct sock *ctrl_sk = session->ctrl_sock->sk;
165 struct sock *intr_sk = session->intr_sock->sk; 165 struct sock *intr_sk = session->intr_sock->sk;
166 166
167 wake_up_interruptible(ctrl_sk->sk_sleep); 167 wake_up_interruptible(sk_sleep(ctrl_sk));
168 wake_up_interruptible(intr_sk->sk_sleep); 168 wake_up_interruptible(sk_sleep(intr_sk));
169} 169}
170 170
171/* HIDP init defines */ 171/* HIDP init defines */
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 99d68c34e4f1..864c76f4a678 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -1147,7 +1147,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl
1147 BT_DBG("sk %p timeo %ld", sk, timeo); 1147 BT_DBG("sk %p timeo %ld", sk, timeo);
1148 1148
1149 /* Wait for an incoming connection. (wake-one). */ 1149 /* Wait for an incoming connection. (wake-one). */
1150 add_wait_queue_exclusive(sk->sk_sleep, &wait); 1150 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1151 while (!(nsk = bt_accept_dequeue(sk, newsock))) { 1151 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1152 set_current_state(TASK_INTERRUPTIBLE); 1152 set_current_state(TASK_INTERRUPTIBLE);
1153 if (!timeo) { 1153 if (!timeo) {
@@ -1170,7 +1170,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl
1170 } 1170 }
1171 } 1171 }
1172 set_current_state(TASK_RUNNING); 1172 set_current_state(TASK_RUNNING);
1173 remove_wait_queue(sk->sk_sleep, &wait); 1173 remove_wait_queue(sk_sleep(sk), &wait);
1174 1174
1175 if (err) 1175 if (err)
1176 goto done; 1176 goto done;
@@ -1626,7 +1626,10 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
1626 /* Connectionless channel */ 1626 /* Connectionless channel */
1627 if (sk->sk_type == SOCK_DGRAM) { 1627 if (sk->sk_type == SOCK_DGRAM) {
1628 skb = l2cap_create_connless_pdu(sk, msg, len); 1628 skb = l2cap_create_connless_pdu(sk, msg, len);
1629 err = l2cap_do_send(sk, skb); 1629 if (IS_ERR(skb))
1630 err = PTR_ERR(skb);
1631 else
1632 err = l2cap_do_send(sk, skb);
1630 goto done; 1633 goto done;
1631 } 1634 }
1632 1635
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 8ed3c37684fa..43fbf6b4b4bf 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -503,7 +503,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
503 BT_DBG("sk %p timeo %ld", sk, timeo); 503 BT_DBG("sk %p timeo %ld", sk, timeo);
504 504
505 /* Wait for an incoming connection. (wake-one). */ 505 /* Wait for an incoming connection. (wake-one). */
506 add_wait_queue_exclusive(sk->sk_sleep, &wait); 506 add_wait_queue_exclusive(sk_sleep(sk), &wait);
507 while (!(nsk = bt_accept_dequeue(sk, newsock))) { 507 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
508 set_current_state(TASK_INTERRUPTIBLE); 508 set_current_state(TASK_INTERRUPTIBLE);
509 if (!timeo) { 509 if (!timeo) {
@@ -526,7 +526,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
526 } 526 }
527 } 527 }
528 set_current_state(TASK_RUNNING); 528 set_current_state(TASK_RUNNING);
529 remove_wait_queue(sk->sk_sleep, &wait); 529 remove_wait_queue(sk_sleep(sk), &wait);
530 530
531 if (err) 531 if (err)
532 goto done; 532 goto done;
@@ -621,7 +621,7 @@ static long rfcomm_sock_data_wait(struct sock *sk, long timeo)
621{ 621{
622 DECLARE_WAITQUEUE(wait, current); 622 DECLARE_WAITQUEUE(wait, current);
623 623
624 add_wait_queue(sk->sk_sleep, &wait); 624 add_wait_queue(sk_sleep(sk), &wait);
625 for (;;) { 625 for (;;) {
626 set_current_state(TASK_INTERRUPTIBLE); 626 set_current_state(TASK_INTERRUPTIBLE);
627 627
@@ -640,7 +640,7 @@ static long rfcomm_sock_data_wait(struct sock *sk, long timeo)
640 } 640 }
641 641
642 __set_current_state(TASK_RUNNING); 642 __set_current_state(TASK_RUNNING);
643 remove_wait_queue(sk->sk_sleep, &wait); 643 remove_wait_queue(sk_sleep(sk), &wait);
644 return timeo; 644 return timeo;
645} 645}
646 646
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index ca6b2ad1c3fc..b406d3eff53a 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -567,7 +567,7 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
567 BT_DBG("sk %p timeo %ld", sk, timeo); 567 BT_DBG("sk %p timeo %ld", sk, timeo);
568 568
569 /* Wait for an incoming connection. (wake-one). */ 569 /* Wait for an incoming connection. (wake-one). */
570 add_wait_queue_exclusive(sk->sk_sleep, &wait); 570 add_wait_queue_exclusive(sk_sleep(sk), &wait);
571 while (!(ch = bt_accept_dequeue(sk, newsock))) { 571 while (!(ch = bt_accept_dequeue(sk, newsock))) {
572 set_current_state(TASK_INTERRUPTIBLE); 572 set_current_state(TASK_INTERRUPTIBLE);
573 if (!timeo) { 573 if (!timeo) {
@@ -590,7 +590,7 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
590 } 590 }
591 } 591 }
592 set_current_state(TASK_RUNNING); 592 set_current_state(TASK_RUNNING);
593 remove_wait_queue(sk->sk_sleep, &wait); 593 remove_wait_queue(sk_sleep(sk), &wait);
594 594
595 if (err) 595 if (err)
596 goto done; 596 goto done;
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index d115d5cea5b6..9190ae462cb4 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -33,14 +33,14 @@ config BRIDGE
33 If unsure, say N. 33 If unsure, say N.
34 34
35config BRIDGE_IGMP_SNOOPING 35config BRIDGE_IGMP_SNOOPING
36 bool "IGMP snooping" 36 bool "IGMP/MLD snooping"
37 depends on BRIDGE 37 depends on BRIDGE
38 depends on INET 38 depends on INET
39 default y 39 default y
40 ---help--- 40 ---help---
41 If you say Y here, then the Ethernet bridge will be able selectively 41 If you say Y here, then the Ethernet bridge will be able selectively
42 forward multicast traffic based on IGMP traffic received from each 42 forward multicast traffic based on IGMP/MLD traffic received from
43 port. 43 each port.
44 44
45 Say N to exclude this support and reduce the binary size. 45 Say N to exclude this support and reduce the binary size.
46 46
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 007bde87415d..f15f9c4a0dd2 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -13,9 +13,12 @@
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/netpoll.h>
16#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
17#include <linux/ethtool.h> 18#include <linux/ethtool.h>
19#include <linux/list.h>
18#include <linux/netfilter_bridge.h> 20#include <linux/netfilter_bridge.h>
21
19#include <asm/uaccess.h> 22#include <asm/uaccess.h>
20#include "br_private.h" 23#include "br_private.h"
21 24
@@ -43,7 +46,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
43 skb_reset_mac_header(skb); 46 skb_reset_mac_header(skb);
44 skb_pull(skb, ETH_HLEN); 47 skb_pull(skb, ETH_HLEN);
45 48
46 if (dest[0] & 1) { 49 if (is_multicast_ether_addr(dest)) {
47 if (br_multicast_rcv(br, NULL, skb)) 50 if (br_multicast_rcv(br, NULL, skb))
48 goto out; 51 goto out;
49 52
@@ -195,6 +198,59 @@ static int br_set_tx_csum(struct net_device *dev, u32 data)
195 return 0; 198 return 0;
196} 199}
197 200
201#ifdef CONFIG_NET_POLL_CONTROLLER
202bool br_devices_support_netpoll(struct net_bridge *br)
203{
204 struct net_bridge_port *p;
205 bool ret = true;
206 int count = 0;
207 unsigned long flags;
208
209 spin_lock_irqsave(&br->lock, flags);
210 list_for_each_entry(p, &br->port_list, list) {
211 count++;
212 if ((p->dev->priv_flags & IFF_DISABLE_NETPOLL) ||
213 !p->dev->netdev_ops->ndo_poll_controller)
214 ret = false;
215 }
216 spin_unlock_irqrestore(&br->lock, flags);
217 return count != 0 && ret;
218}
219
220static void br_poll_controller(struct net_device *br_dev)
221{
222 struct netpoll *np = br_dev->npinfo->netpoll;
223
224 if (np->real_dev != br_dev)
225 netpoll_poll_dev(np->real_dev);
226}
227
228void br_netpoll_cleanup(struct net_device *br_dev)
229{
230 struct net_bridge *br = netdev_priv(br_dev);
231 struct net_bridge_port *p, *n;
232 const struct net_device_ops *ops;
233
234 br->dev->npinfo = NULL;
235 list_for_each_entry_safe(p, n, &br->port_list, list) {
236 if (p->dev) {
237 ops = p->dev->netdev_ops;
238 if (ops->ndo_netpoll_cleanup)
239 ops->ndo_netpoll_cleanup(p->dev);
240 else
241 p->dev->npinfo = NULL;
242 }
243 }
244}
245
246#else
247
248void br_netpoll_cleanup(struct net_device *br_dev)
249{
250}
251
252#endif
253
198static const struct ethtool_ops br_ethtool_ops = { 254static const struct ethtool_ops br_ethtool_ops = {
199 .get_drvinfo = br_getinfo, 255 .get_drvinfo = br_getinfo,
200 .get_link = ethtool_op_get_link, 256 .get_link = ethtool_op_get_link,
@@ -218,6 +274,10 @@ static const struct net_device_ops br_netdev_ops = {
218 .ndo_set_multicast_list = br_dev_set_multicast_list, 274 .ndo_set_multicast_list = br_dev_set_multicast_list,
219 .ndo_change_mtu = br_change_mtu, 275 .ndo_change_mtu = br_change_mtu,
220 .ndo_do_ioctl = br_dev_ioctl, 276 .ndo_do_ioctl = br_dev_ioctl,
277#ifdef CONFIG_NET_POLL_CONTROLLER
278 .ndo_netpoll_cleanup = br_netpoll_cleanup,
279 .ndo_poll_controller = br_poll_controller,
280#endif
221}; 281};
222 282
223static void br_dev_free(struct net_device *dev) 283static void br_dev_free(struct net_device *dev)
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 92fb3293a215..a98ef1393097 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -15,6 +15,7 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/netpoll.h>
18#include <linux/skbuff.h> 19#include <linux/skbuff.h>
19#include <linux/if_vlan.h> 20#include <linux/if_vlan.h>
20#include <linux/netfilter_bridge.h> 21#include <linux/netfilter_bridge.h>
@@ -50,7 +51,13 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
50 else { 51 else {
51 skb_push(skb, ETH_HLEN); 52 skb_push(skb, ETH_HLEN);
52 53
53 dev_queue_xmit(skb); 54#ifdef CONFIG_NET_POLL_CONTROLLER
55 if (unlikely(skb->dev->priv_flags & IFF_IN_NETPOLL)) {
56 netpoll_send_skb(skb->dev->npinfo->netpoll, skb);
57 skb->dev->priv_flags &= ~IFF_IN_NETPOLL;
58 } else
59#endif
60 dev_queue_xmit(skb);
54 } 61 }
55 } 62 }
56 63
@@ -66,9 +73,23 @@ int br_forward_finish(struct sk_buff *skb)
66 73
67static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) 74static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
68{ 75{
76#ifdef CONFIG_NET_POLL_CONTROLLER
77 struct net_bridge *br = to->br;
78 if (unlikely(br->dev->priv_flags & IFF_IN_NETPOLL)) {
79 struct netpoll *np;
80 to->dev->npinfo = skb->dev->npinfo;
81 np = skb->dev->npinfo->netpoll;
82 np->real_dev = np->dev = to->dev;
83 to->dev->priv_flags |= IFF_IN_NETPOLL;
84 }
85#endif
69 skb->dev = to->dev; 86 skb->dev = to->dev;
70 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 87 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
71 br_forward_finish); 88 br_forward_finish);
89#ifdef CONFIG_NET_POLL_CONTROLLER
90 if (skb->dev->npinfo)
91 skb->dev->npinfo->netpoll->dev = br->dev;
92#endif
72} 93}
73 94
74static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb) 95static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
@@ -208,17 +229,15 @@ static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
208{ 229{
209 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; 230 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
210 struct net_bridge *br = netdev_priv(dev); 231 struct net_bridge *br = netdev_priv(dev);
211 struct net_bridge_port *port; 232 struct net_bridge_port *prev = NULL;
212 struct net_bridge_port *lport, *rport;
213 struct net_bridge_port *prev;
214 struct net_bridge_port_group *p; 233 struct net_bridge_port_group *p;
215 struct hlist_node *rp; 234 struct hlist_node *rp;
216 235
217 prev = NULL; 236 rp = rcu_dereference(br->router_list.first);
218 237 p = mdst ? rcu_dereference(mdst->ports) : NULL;
219 rp = br->router_list.first;
220 p = mdst ? mdst->ports : NULL;
221 while (p || rp) { 238 while (p || rp) {
239 struct net_bridge_port *port, *lport, *rport;
240
222 lport = p ? p->port : NULL; 241 lport = p ? p->port : NULL;
223 rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) : 242 rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
224 NULL; 243 NULL;
@@ -231,9 +250,9 @@ static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
231 goto out; 250 goto out;
232 251
233 if ((unsigned long)lport >= (unsigned long)port) 252 if ((unsigned long)lport >= (unsigned long)port)
234 p = p->next; 253 p = rcu_dereference(p->next);
235 if ((unsigned long)rport >= (unsigned long)port) 254 if ((unsigned long)rport >= (unsigned long)port)
236 rp = rp->next; 255 rp = rcu_dereference(rp->next);
237 } 256 }
238 257
239 if (!prev) 258 if (!prev)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 521439333316..537bdd60d9b9 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/netpoll.h>
16#include <linux/ethtool.h> 17#include <linux/ethtool.h>
17#include <linux/if_arp.h> 18#include <linux/if_arp.h>
18#include <linux/module.h> 19#include <linux/module.h>
@@ -153,6 +154,14 @@ static void del_nbp(struct net_bridge_port *p)
153 kobject_uevent(&p->kobj, KOBJ_REMOVE); 154 kobject_uevent(&p->kobj, KOBJ_REMOVE);
154 kobject_del(&p->kobj); 155 kobject_del(&p->kobj);
155 156
157#ifdef CONFIG_NET_POLL_CONTROLLER
158 if (br_devices_support_netpoll(br))
159 br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
160 if (dev->netdev_ops->ndo_netpoll_cleanup)
161 dev->netdev_ops->ndo_netpoll_cleanup(dev);
162 else
163 dev->npinfo = NULL;
164#endif
156 call_rcu(&p->rcu, destroy_nbp_rcu); 165 call_rcu(&p->rcu, destroy_nbp_rcu);
157} 166}
158 167
@@ -165,6 +174,8 @@ static void del_br(struct net_bridge *br, struct list_head *head)
165 del_nbp(p); 174 del_nbp(p);
166 } 175 }
167 176
177 br_netpoll_cleanup(br->dev);
178
168 del_timer_sync(&br->gc_timer); 179 del_timer_sync(&br->gc_timer);
169 180
170 br_sysfs_delbr(br->dev); 181 br_sysfs_delbr(br->dev);
@@ -444,6 +455,20 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
444 455
445 kobject_uevent(&p->kobj, KOBJ_ADD); 456 kobject_uevent(&p->kobj, KOBJ_ADD);
446 457
458#ifdef CONFIG_NET_POLL_CONTROLLER
459 if (br_devices_support_netpoll(br)) {
460 br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
461 if (br->dev->npinfo)
462 dev->npinfo = br->dev->npinfo;
463 } else if (!(br->dev->priv_flags & IFF_DISABLE_NETPOLL)) {
464 br->dev->priv_flags |= IFF_DISABLE_NETPOLL;
465 printk(KERN_INFO "New device %s does not support netpoll\n",
466 dev->name);
467 printk(KERN_INFO "Disabling netpoll for %s\n",
468 br->dev->name);
469 }
470#endif
471
447 return 0; 472 return 0;
448err2: 473err2:
449 br_fdb_delete_by_port(br, p, 1); 474 br_fdb_delete_by_port(br, p, 1);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 8ccdb8ee3928..c8419e240316 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -24,51 +24,139 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/timer.h> 25#include <linux/timer.h>
26#include <net/ip.h> 26#include <net/ip.h>
27#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
28#include <net/ipv6.h>
29#include <net/mld.h>
30#include <net/addrconf.h>
31#include <net/ip6_checksum.h>
32#endif
27 33
28#include "br_private.h" 34#include "br_private.h"
29 35
30static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, __be32 ip) 36#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
37static inline int ipv6_is_local_multicast(const struct in6_addr *addr)
31{ 38{
32 return jhash_1word(mdb->secret, (u32)ip) & (mdb->max - 1); 39 if (ipv6_addr_is_multicast(addr) &&
40 IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL)
41 return 1;
42 return 0;
43}
44#endif
45
46static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
47{
48 if (a->proto != b->proto)
49 return 0;
50 switch (a->proto) {
51 case htons(ETH_P_IP):
52 return a->u.ip4 == b->u.ip4;
53#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
54 case htons(ETH_P_IPV6):
55 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
56#endif
57 }
58 return 0;
59}
60
61static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip)
62{
63 return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1);
64}
65
66#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
67static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
68 const struct in6_addr *ip)
69{
70 return jhash2((__force u32 *)ip->s6_addr32, 4, mdb->secret) & (mdb->max - 1);
71}
72#endif
73
74static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
75 struct br_ip *ip)
76{
77 switch (ip->proto) {
78 case htons(ETH_P_IP):
79 return __br_ip4_hash(mdb, ip->u.ip4);
80#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
81 case htons(ETH_P_IPV6):
82 return __br_ip6_hash(mdb, &ip->u.ip6);
83#endif
84 }
85 return 0;
33} 86}
34 87
35static struct net_bridge_mdb_entry *__br_mdb_ip_get( 88static struct net_bridge_mdb_entry *__br_mdb_ip_get(
36 struct net_bridge_mdb_htable *mdb, __be32 dst, int hash) 89 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash)
37{ 90{
38 struct net_bridge_mdb_entry *mp; 91 struct net_bridge_mdb_entry *mp;
39 struct hlist_node *p; 92 struct hlist_node *p;
40 93
41 hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { 94 hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
42 if (dst == mp->addr) 95 if (br_ip_equal(&mp->addr, dst))
43 return mp; 96 return mp;
44 } 97 }
45 98
46 return NULL; 99 return NULL;
47} 100}
48 101
49static struct net_bridge_mdb_entry *br_mdb_ip_get( 102static struct net_bridge_mdb_entry *br_mdb_ip4_get(
50 struct net_bridge_mdb_htable *mdb, __be32 dst) 103 struct net_bridge_mdb_htable *mdb, __be32 dst)
51{ 104{
52 if (!mdb) 105 struct br_ip br_dst;
53 return NULL; 106
107 br_dst.u.ip4 = dst;
108 br_dst.proto = htons(ETH_P_IP);
109
110 return __br_mdb_ip_get(mdb, &br_dst, __br_ip4_hash(mdb, dst));
111}
112
113#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
114static struct net_bridge_mdb_entry *br_mdb_ip6_get(
115 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst)
116{
117 struct br_ip br_dst;
54 118
119 ipv6_addr_copy(&br_dst.u.ip6, dst);
120 br_dst.proto = htons(ETH_P_IPV6);
121
122 return __br_mdb_ip_get(mdb, &br_dst, __br_ip6_hash(mdb, dst));
123}
124#endif
125
126static struct net_bridge_mdb_entry *br_mdb_ip_get(
127 struct net_bridge_mdb_htable *mdb, struct br_ip *dst)
128{
55 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); 129 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
56} 130}
57 131
58struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 132struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
59 struct sk_buff *skb) 133 struct sk_buff *skb)
60{ 134{
61 if (br->multicast_disabled) 135 struct net_bridge_mdb_htable *mdb = br->mdb;
136 struct br_ip ip;
137
138 if (!mdb || br->multicast_disabled)
139 return NULL;
140
141 if (BR_INPUT_SKB_CB(skb)->igmp)
62 return NULL; 142 return NULL;
63 143
144 ip.proto = skb->protocol;
145
64 switch (skb->protocol) { 146 switch (skb->protocol) {
65 case htons(ETH_P_IP): 147 case htons(ETH_P_IP):
66 if (BR_INPUT_SKB_CB(skb)->igmp) 148 ip.u.ip4 = ip_hdr(skb)->daddr;
67 break; 149 break;
68 return br_mdb_ip_get(br->mdb, ip_hdr(skb)->daddr); 150#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
151 case htons(ETH_P_IPV6):
152 ipv6_addr_copy(&ip.u.ip6, &ipv6_hdr(skb)->daddr);
153 break;
154#endif
155 default:
156 return NULL;
69 } 157 }
70 158
71 return NULL; 159 return br_mdb_ip_get(mdb, &ip);
72} 160}
73 161
74static void br_mdb_free(struct rcu_head *head) 162static void br_mdb_free(struct rcu_head *head)
@@ -95,7 +183,7 @@ static int br_mdb_copy(struct net_bridge_mdb_htable *new,
95 for (i = 0; i < old->max; i++) 183 for (i = 0; i < old->max; i++)
96 hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver]) 184 hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver])
97 hlist_add_head(&mp->hlist[new->ver], 185 hlist_add_head(&mp->hlist[new->ver],
98 &new->mhash[br_ip_hash(new, mp->addr)]); 186 &new->mhash[br_ip_hash(new, &mp->addr)]);
99 187
100 if (!elasticity) 188 if (!elasticity)
101 return 0; 189 return 0;
@@ -163,7 +251,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
163 struct net_bridge_port_group *p; 251 struct net_bridge_port_group *p;
164 struct net_bridge_port_group **pp; 252 struct net_bridge_port_group **pp;
165 253
166 mp = br_mdb_ip_get(mdb, pg->addr); 254 mp = br_mdb_ip_get(mdb, &pg->addr);
167 if (WARN_ON(!mp)) 255 if (WARN_ON(!mp))
168 return; 256 return;
169 257
@@ -171,7 +259,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
171 if (p != pg) 259 if (p != pg)
172 continue; 260 continue;
173 261
174 *pp = p->next; 262 rcu_assign_pointer(*pp, p->next);
175 hlist_del_init(&p->mglist); 263 hlist_del_init(&p->mglist);
176 del_timer(&p->timer); 264 del_timer(&p->timer);
177 del_timer(&p->query_timer); 265 del_timer(&p->query_timer);
@@ -249,8 +337,8 @@ out:
249 return 0; 337 return 0;
250} 338}
251 339
252static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 340static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
253 __be32 group) 341 __be32 group)
254{ 342{
255 struct sk_buff *skb; 343 struct sk_buff *skb;
256 struct igmphdr *ih; 344 struct igmphdr *ih;
@@ -314,12 +402,104 @@ out:
314 return skb; 402 return skb;
315} 403}
316 404
405#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
406static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
407 struct in6_addr *group)
408{
409 struct sk_buff *skb;
410 struct ipv6hdr *ip6h;
411 struct mld_msg *mldq;
412 struct ethhdr *eth;
413 u8 *hopopt;
414 unsigned long interval;
415
416 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
417 8 + sizeof(*mldq));
418 if (!skb)
419 goto out;
420
421 skb->protocol = htons(ETH_P_IPV6);
422
423 /* Ethernet header */
424 skb_reset_mac_header(skb);
425 eth = eth_hdr(skb);
426
427 memcpy(eth->h_source, br->dev->dev_addr, 6);
428 ipv6_eth_mc_map(group, eth->h_dest);
429 eth->h_proto = htons(ETH_P_IPV6);
430 skb_put(skb, sizeof(*eth));
431
432 /* IPv6 header + HbH option */
433 skb_set_network_header(skb, skb->len);
434 ip6h = ipv6_hdr(skb);
435
436 *(__force __be32 *)ip6h = htonl(0x60000000);
437 ip6h->payload_len = 8 + sizeof(*mldq);
438 ip6h->nexthdr = IPPROTO_HOPOPTS;
439 ip6h->hop_limit = 1;
440 ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0);
441 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
442
443 hopopt = (u8 *)(ip6h + 1);
444 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
445 hopopt[1] = 0; /* length of HbH */
446 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
447 hopopt[3] = 2; /* Length of RA Option */
448 hopopt[4] = 0; /* Type = 0x0000 (MLD) */
449 hopopt[5] = 0;
450 hopopt[6] = IPV6_TLV_PAD0; /* Pad0 */
451 hopopt[7] = IPV6_TLV_PAD0; /* Pad0 */
452
453 skb_put(skb, sizeof(*ip6h) + 8);
454
455 /* ICMPv6 */
456 skb_set_transport_header(skb, skb->len);
457 mldq = (struct mld_msg *) icmp6_hdr(skb);
458
459 interval = ipv6_addr_any(group) ? br->multicast_last_member_interval :
460 br->multicast_query_response_interval;
461
462 mldq->mld_type = ICMPV6_MGM_QUERY;
463 mldq->mld_code = 0;
464 mldq->mld_cksum = 0;
465 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
466 mldq->mld_reserved = 0;
467 ipv6_addr_copy(&mldq->mld_mca, group);
468
469 /* checksum */
470 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
471 sizeof(*mldq), IPPROTO_ICMPV6,
472 csum_partial(mldq,
473 sizeof(*mldq), 0));
474 skb_put(skb, sizeof(*mldq));
475
476 __skb_pull(skb, sizeof(*eth));
477
478out:
479 return skb;
480}
481#endif
482
483static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
484 struct br_ip *addr)
485{
486 switch (addr->proto) {
487 case htons(ETH_P_IP):
488 return br_ip4_multicast_alloc_query(br, addr->u.ip4);
489#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
490 case htons(ETH_P_IPV6):
491 return br_ip6_multicast_alloc_query(br, &addr->u.ip6);
492#endif
493 }
494 return NULL;
495}
496
317static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp) 497static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp)
318{ 498{
319 struct net_bridge *br = mp->br; 499 struct net_bridge *br = mp->br;
320 struct sk_buff *skb; 500 struct sk_buff *skb;
321 501
322 skb = br_multicast_alloc_query(br, mp->addr); 502 skb = br_multicast_alloc_query(br, &mp->addr);
323 if (!skb) 503 if (!skb)
324 goto timer; 504 goto timer;
325 505
@@ -353,7 +533,7 @@ static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg)
353 struct net_bridge *br = port->br; 533 struct net_bridge *br = port->br;
354 struct sk_buff *skb; 534 struct sk_buff *skb;
355 535
356 skb = br_multicast_alloc_query(br, pg->addr); 536 skb = br_multicast_alloc_query(br, &pg->addr);
357 if (!skb) 537 if (!skb)
358 goto timer; 538 goto timer;
359 539
@@ -383,8 +563,8 @@ out:
383} 563}
384 564
385static struct net_bridge_mdb_entry *br_multicast_get_group( 565static struct net_bridge_mdb_entry *br_multicast_get_group(
386 struct net_bridge *br, struct net_bridge_port *port, __be32 group, 566 struct net_bridge *br, struct net_bridge_port *port,
387 int hash) 567 struct br_ip *group, int hash)
388{ 568{
389 struct net_bridge_mdb_htable *mdb = br->mdb; 569 struct net_bridge_mdb_htable *mdb = br->mdb;
390 struct net_bridge_mdb_entry *mp; 570 struct net_bridge_mdb_entry *mp;
@@ -396,9 +576,8 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
396 576
397 hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { 577 hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
398 count++; 578 count++;
399 if (unlikely(group == mp->addr)) { 579 if (unlikely(br_ip_equal(group, &mp->addr)))
400 return mp; 580 return mp;
401 }
402 } 581 }
403 582
404 elasticity = 0; 583 elasticity = 0;
@@ -463,7 +642,8 @@ err:
463} 642}
464 643
465static struct net_bridge_mdb_entry *br_multicast_new_group( 644static struct net_bridge_mdb_entry *br_multicast_new_group(
466 struct net_bridge *br, struct net_bridge_port *port, __be32 group) 645 struct net_bridge *br, struct net_bridge_port *port,
646 struct br_ip *group)
467{ 647{
468 struct net_bridge_mdb_htable *mdb = br->mdb; 648 struct net_bridge_mdb_htable *mdb = br->mdb;
469 struct net_bridge_mdb_entry *mp; 649 struct net_bridge_mdb_entry *mp;
@@ -496,7 +676,7 @@ rehash:
496 goto out; 676 goto out;
497 677
498 mp->br = br; 678 mp->br = br;
499 mp->addr = group; 679 mp->addr = *group;
500 setup_timer(&mp->timer, br_multicast_group_expired, 680 setup_timer(&mp->timer, br_multicast_group_expired,
501 (unsigned long)mp); 681 (unsigned long)mp);
502 setup_timer(&mp->query_timer, br_multicast_group_query_expired, 682 setup_timer(&mp->query_timer, br_multicast_group_query_expired,
@@ -510,7 +690,8 @@ out:
510} 690}
511 691
512static int br_multicast_add_group(struct net_bridge *br, 692static int br_multicast_add_group(struct net_bridge *br,
513 struct net_bridge_port *port, __be32 group) 693 struct net_bridge_port *port,
694 struct br_ip *group)
514{ 695{
515 struct net_bridge_mdb_entry *mp; 696 struct net_bridge_mdb_entry *mp;
516 struct net_bridge_port_group *p; 697 struct net_bridge_port_group *p;
@@ -518,9 +699,6 @@ static int br_multicast_add_group(struct net_bridge *br,
518 unsigned long now = jiffies; 699 unsigned long now = jiffies;
519 int err; 700 int err;
520 701
521 if (ipv4_is_local_multicast(group))
522 return 0;
523
524 spin_lock(&br->multicast_lock); 702 spin_lock(&br->multicast_lock);
525 if (!netif_running(br->dev) || 703 if (!netif_running(br->dev) ||
526 (port && port->state == BR_STATE_DISABLED)) 704 (port && port->state == BR_STATE_DISABLED))
@@ -549,7 +727,7 @@ static int br_multicast_add_group(struct net_bridge *br,
549 if (unlikely(!p)) 727 if (unlikely(!p))
550 goto err; 728 goto err;
551 729
552 p->addr = group; 730 p->addr = *group;
553 p->port = port; 731 p->port = port;
554 p->next = *pp; 732 p->next = *pp;
555 hlist_add_head(&p->mglist, &port->mglist); 733 hlist_add_head(&p->mglist, &port->mglist);
@@ -570,6 +748,38 @@ err:
570 return err; 748 return err;
571} 749}
572 750
751static int br_ip4_multicast_add_group(struct net_bridge *br,
752 struct net_bridge_port *port,
753 __be32 group)
754{
755 struct br_ip br_group;
756
757 if (ipv4_is_local_multicast(group))
758 return 0;
759
760 br_group.u.ip4 = group;
761 br_group.proto = htons(ETH_P_IP);
762
763 return br_multicast_add_group(br, port, &br_group);
764}
765
766#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
767static int br_ip6_multicast_add_group(struct net_bridge *br,
768 struct net_bridge_port *port,
769 const struct in6_addr *group)
770{
771 struct br_ip br_group;
772
773 if (ipv6_is_local_multicast(group))
774 return 0;
775
776 ipv6_addr_copy(&br_group.u.ip6, group);
777 br_group.proto = htons(ETH_P_IP);
778
779 return br_multicast_add_group(br, port, &br_group);
780}
781#endif
782
573static void br_multicast_router_expired(unsigned long data) 783static void br_multicast_router_expired(unsigned long data)
574{ 784{
575 struct net_bridge_port *port = (void *)data; 785 struct net_bridge_port *port = (void *)data;
@@ -591,19 +801,15 @@ static void br_multicast_local_router_expired(unsigned long data)
591{ 801{
592} 802}
593 803
594static void br_multicast_send_query(struct net_bridge *br, 804static void __br_multicast_send_query(struct net_bridge *br,
595 struct net_bridge_port *port, u32 sent) 805 struct net_bridge_port *port,
806 struct br_ip *ip)
596{ 807{
597 unsigned long time;
598 struct sk_buff *skb; 808 struct sk_buff *skb;
599 809
600 if (!netif_running(br->dev) || br->multicast_disabled || 810 skb = br_multicast_alloc_query(br, ip);
601 timer_pending(&br->multicast_querier_timer))
602 return;
603
604 skb = br_multicast_alloc_query(br, 0);
605 if (!skb) 811 if (!skb)
606 goto timer; 812 return;
607 813
608 if (port) { 814 if (port) {
609 __skb_push(skb, sizeof(struct ethhdr)); 815 __skb_push(skb, sizeof(struct ethhdr));
@@ -612,8 +818,28 @@ static void br_multicast_send_query(struct net_bridge *br,
612 dev_queue_xmit); 818 dev_queue_xmit);
613 } else 819 } else
614 netif_rx(skb); 820 netif_rx(skb);
821}
822
823static void br_multicast_send_query(struct net_bridge *br,
824 struct net_bridge_port *port, u32 sent)
825{
826 unsigned long time;
827 struct br_ip br_group;
828
829 if (!netif_running(br->dev) || br->multicast_disabled ||
830 timer_pending(&br->multicast_querier_timer))
831 return;
832
833 memset(&br_group.u, 0, sizeof(br_group.u));
834
835 br_group.proto = htons(ETH_P_IP);
836 __br_multicast_send_query(br, port, &br_group);
837
838#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
839 br_group.proto = htons(ETH_P_IPV6);
840 __br_multicast_send_query(br, port, &br_group);
841#endif
615 842
616timer:
617 time = jiffies; 843 time = jiffies;
618 time += sent < br->multicast_startup_query_count ? 844 time += sent < br->multicast_startup_query_count ?
619 br->multicast_startup_query_interval : 845 br->multicast_startup_query_interval :
@@ -698,9 +924,9 @@ void br_multicast_disable_port(struct net_bridge_port *port)
698 spin_unlock(&br->multicast_lock); 924 spin_unlock(&br->multicast_lock);
699} 925}
700 926
701static int br_multicast_igmp3_report(struct net_bridge *br, 927static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
702 struct net_bridge_port *port, 928 struct net_bridge_port *port,
703 struct sk_buff *skb) 929 struct sk_buff *skb)
704{ 930{
705 struct igmpv3_report *ih; 931 struct igmpv3_report *ih;
706 struct igmpv3_grec *grec; 932 struct igmpv3_grec *grec;
@@ -727,7 +953,7 @@ static int br_multicast_igmp3_report(struct net_bridge *br,
727 group = grec->grec_mca; 953 group = grec->grec_mca;
728 type = grec->grec_type; 954 type = grec->grec_type;
729 955
730 len += grec->grec_nsrcs * 4; 956 len += ntohs(grec->grec_nsrcs) * 4;
731 if (!pskb_may_pull(skb, len)) 957 if (!pskb_may_pull(skb, len))
732 return -EINVAL; 958 return -EINVAL;
733 959
@@ -745,7 +971,7 @@ static int br_multicast_igmp3_report(struct net_bridge *br,
745 continue; 971 continue;
746 } 972 }
747 973
748 err = br_multicast_add_group(br, port, group); 974 err = br_ip4_multicast_add_group(br, port, group);
749 if (err) 975 if (err)
750 break; 976 break;
751 } 977 }
@@ -753,24 +979,87 @@ static int br_multicast_igmp3_report(struct net_bridge *br,
753 return err; 979 return err;
754} 980}
755 981
982#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
983static int br_ip6_multicast_mld2_report(struct net_bridge *br,
984 struct net_bridge_port *port,
985 struct sk_buff *skb)
986{
987 struct icmp6hdr *icmp6h;
988 struct mld2_grec *grec;
989 int i;
990 int len;
991 int num;
992 int err = 0;
993
994 if (!pskb_may_pull(skb, sizeof(*icmp6h)))
995 return -EINVAL;
996
997 icmp6h = icmp6_hdr(skb);
998 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
999 len = sizeof(*icmp6h);
1000
1001 for (i = 0; i < num; i++) {
1002 __be16 *nsrcs, _nsrcs;
1003
1004 nsrcs = skb_header_pointer(skb,
1005 len + offsetof(struct mld2_grec,
1006 grec_mca),
1007 sizeof(_nsrcs), &_nsrcs);
1008 if (!nsrcs)
1009 return -EINVAL;
1010
1011 if (!pskb_may_pull(skb,
1012 len + sizeof(*grec) +
1013 sizeof(struct in6_addr) * (*nsrcs)))
1014 return -EINVAL;
1015
1016 grec = (struct mld2_grec *)(skb->data + len);
1017 len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs);
1018
1019 /* We treat these as MLDv1 reports for now. */
1020 switch (grec->grec_type) {
1021 case MLD2_MODE_IS_INCLUDE:
1022 case MLD2_MODE_IS_EXCLUDE:
1023 case MLD2_CHANGE_TO_INCLUDE:
1024 case MLD2_CHANGE_TO_EXCLUDE:
1025 case MLD2_ALLOW_NEW_SOURCES:
1026 case MLD2_BLOCK_OLD_SOURCES:
1027 break;
1028
1029 default:
1030 continue;
1031 }
1032
1033 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca);
1034 if (!err)
1035 break;
1036 }
1037
1038 return err;
1039}
1040#endif
1041
1042/*
1043 * Add port to rotuer_list
1044 * list is maintained ordered by pointer value
1045 * and locked by br->multicast_lock and RCU
1046 */
756static void br_multicast_add_router(struct net_bridge *br, 1047static void br_multicast_add_router(struct net_bridge *br,
757 struct net_bridge_port *port) 1048 struct net_bridge_port *port)
758{ 1049{
759 struct hlist_node *p; 1050 struct net_bridge_port *p;
760 struct hlist_node **h; 1051 struct hlist_node *n, *slot = NULL;
761 1052
762 for (h = &br->router_list.first; 1053 hlist_for_each_entry(p, n, &br->router_list, rlist) {
763 (p = *h) && 1054 if ((unsigned long) port >= (unsigned long) p)
764 (unsigned long)container_of(p, struct net_bridge_port, rlist) > 1055 break;
765 (unsigned long)port; 1056 slot = n;
766 h = &p->next) 1057 }
767 ; 1058
768 1059 if (slot)
769 port->rlist.pprev = h; 1060 hlist_add_after_rcu(slot, &port->rlist);
770 port->rlist.next = p; 1061 else
771 rcu_assign_pointer(*h, &port->rlist); 1062 hlist_add_head_rcu(&port->rlist, &br->router_list);
772 if (p)
773 p->pprev = &port->rlist.next;
774} 1063}
775 1064
776static void br_multicast_mark_router(struct net_bridge *br, 1065static void br_multicast_mark_router(struct net_bridge *br,
@@ -800,7 +1089,7 @@ timer:
800 1089
801static void br_multicast_query_received(struct net_bridge *br, 1090static void br_multicast_query_received(struct net_bridge *br,
802 struct net_bridge_port *port, 1091 struct net_bridge_port *port,
803 __be32 saddr) 1092 int saddr)
804{ 1093{
805 if (saddr) 1094 if (saddr)
806 mod_timer(&br->multicast_querier_timer, 1095 mod_timer(&br->multicast_querier_timer,
@@ -811,9 +1100,9 @@ static void br_multicast_query_received(struct net_bridge *br,
811 br_multicast_mark_router(br, port); 1100 br_multicast_mark_router(br, port);
812} 1101}
813 1102
814static int br_multicast_query(struct net_bridge *br, 1103static int br_ip4_multicast_query(struct net_bridge *br,
815 struct net_bridge_port *port, 1104 struct net_bridge_port *port,
816 struct sk_buff *skb) 1105 struct sk_buff *skb)
817{ 1106{
818 struct iphdr *iph = ip_hdr(skb); 1107 struct iphdr *iph = ip_hdr(skb);
819 struct igmphdr *ih = igmp_hdr(skb); 1108 struct igmphdr *ih = igmp_hdr(skb);
@@ -831,7 +1120,7 @@ static int br_multicast_query(struct net_bridge *br,
831 (port && port->state == BR_STATE_DISABLED)) 1120 (port && port->state == BR_STATE_DISABLED))
832 goto out; 1121 goto out;
833 1122
834 br_multicast_query_received(br, port, iph->saddr); 1123 br_multicast_query_received(br, port, !!iph->saddr);
835 1124
836 group = ih->group; 1125 group = ih->group;
837 1126
@@ -859,7 +1148,7 @@ static int br_multicast_query(struct net_bridge *br,
859 if (!group) 1148 if (!group)
860 goto out; 1149 goto out;
861 1150
862 mp = br_mdb_ip_get(br->mdb, group); 1151 mp = br_mdb_ip4_get(br->mdb, group);
863 if (!mp) 1152 if (!mp)
864 goto out; 1153 goto out;
865 1154
@@ -883,9 +1172,78 @@ out:
883 return err; 1172 return err;
884} 1173}
885 1174
1175#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1176static int br_ip6_multicast_query(struct net_bridge *br,
1177 struct net_bridge_port *port,
1178 struct sk_buff *skb)
1179{
1180 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1181 struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb);
1182 struct net_bridge_mdb_entry *mp;
1183 struct mld2_query *mld2q;
1184 struct net_bridge_port_group *p, **pp;
1185 unsigned long max_delay;
1186 unsigned long now = jiffies;
1187 struct in6_addr *group = NULL;
1188 int err = 0;
1189
1190 spin_lock(&br->multicast_lock);
1191 if (!netif_running(br->dev) ||
1192 (port && port->state == BR_STATE_DISABLED))
1193 goto out;
1194
1195 br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr));
1196
1197 if (skb->len == sizeof(*mld)) {
1198 if (!pskb_may_pull(skb, sizeof(*mld))) {
1199 err = -EINVAL;
1200 goto out;
1201 }
1202 mld = (struct mld_msg *) icmp6_hdr(skb);
1203 max_delay = msecs_to_jiffies(htons(mld->mld_maxdelay));
1204 if (max_delay)
1205 group = &mld->mld_mca;
1206 } else if (skb->len >= sizeof(*mld2q)) {
1207 if (!pskb_may_pull(skb, sizeof(*mld2q))) {
1208 err = -EINVAL;
1209 goto out;
1210 }
1211 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1212 if (!mld2q->mld2q_nsrcs)
1213 group = &mld2q->mld2q_mca;
1214 max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(mld2q->mld2q_mrc) : 1;
1215 }
1216
1217 if (!group)
1218 goto out;
1219
1220 mp = br_mdb_ip6_get(br->mdb, group);
1221 if (!mp)
1222 goto out;
1223
1224 max_delay *= br->multicast_last_member_count;
1225 if (!hlist_unhashed(&mp->mglist) &&
1226 (timer_pending(&mp->timer) ?
1227 time_after(mp->timer.expires, now + max_delay) :
1228 try_to_del_timer_sync(&mp->timer) >= 0))
1229 mod_timer(&mp->timer, now + max_delay);
1230
1231 for (pp = &mp->ports; (p = *pp); pp = &p->next) {
1232 if (timer_pending(&p->timer) ?
1233 time_after(p->timer.expires, now + max_delay) :
1234 try_to_del_timer_sync(&p->timer) >= 0)
1235 mod_timer(&mp->timer, now + max_delay);
1236 }
1237
1238out:
1239 spin_unlock(&br->multicast_lock);
1240 return err;
1241}
1242#endif
1243
886static void br_multicast_leave_group(struct net_bridge *br, 1244static void br_multicast_leave_group(struct net_bridge *br,
887 struct net_bridge_port *port, 1245 struct net_bridge_port *port,
888 __be32 group) 1246 struct br_ip *group)
889{ 1247{
890 struct net_bridge_mdb_htable *mdb; 1248 struct net_bridge_mdb_htable *mdb;
891 struct net_bridge_mdb_entry *mp; 1249 struct net_bridge_mdb_entry *mp;
@@ -893,9 +1251,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
893 unsigned long now; 1251 unsigned long now;
894 unsigned long time; 1252 unsigned long time;
895 1253
896 if (ipv4_is_local_multicast(group))
897 return;
898
899 spin_lock(&br->multicast_lock); 1254 spin_lock(&br->multicast_lock);
900 if (!netif_running(br->dev) || 1255 if (!netif_running(br->dev) ||
901 (port && port->state == BR_STATE_DISABLED) || 1256 (port && port->state == BR_STATE_DISABLED) ||
@@ -946,6 +1301,38 @@ out:
946 spin_unlock(&br->multicast_lock); 1301 spin_unlock(&br->multicast_lock);
947} 1302}
948 1303
1304static void br_ip4_multicast_leave_group(struct net_bridge *br,
1305 struct net_bridge_port *port,
1306 __be32 group)
1307{
1308 struct br_ip br_group;
1309
1310 if (ipv4_is_local_multicast(group))
1311 return;
1312
1313 br_group.u.ip4 = group;
1314 br_group.proto = htons(ETH_P_IP);
1315
1316 br_multicast_leave_group(br, port, &br_group);
1317}
1318
1319#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1320static void br_ip6_multicast_leave_group(struct net_bridge *br,
1321 struct net_bridge_port *port,
1322 const struct in6_addr *group)
1323{
1324 struct br_ip br_group;
1325
1326 if (ipv6_is_local_multicast(group))
1327 return;
1328
1329 ipv6_addr_copy(&br_group.u.ip6, group);
1330 br_group.proto = htons(ETH_P_IPV6);
1331
1332 br_multicast_leave_group(br, port, &br_group);
1333}
1334#endif
1335
949static int br_multicast_ipv4_rcv(struct net_bridge *br, 1336static int br_multicast_ipv4_rcv(struct net_bridge *br,
950 struct net_bridge_port *port, 1337 struct net_bridge_port *port,
951 struct sk_buff *skb) 1338 struct sk_buff *skb)
@@ -957,9 +1344,6 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
957 unsigned offset; 1344 unsigned offset;
958 int err; 1345 int err;
959 1346
960 BR_INPUT_SKB_CB(skb)->igmp = 0;
961 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
962
963 /* We treat OOM as packet loss for now. */ 1347 /* We treat OOM as packet loss for now. */
964 if (!pskb_may_pull(skb, sizeof(*iph))) 1348 if (!pskb_may_pull(skb, sizeof(*iph)))
965 return -EINVAL; 1349 return -EINVAL;
@@ -1023,16 +1407,16 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1023 case IGMP_HOST_MEMBERSHIP_REPORT: 1407 case IGMP_HOST_MEMBERSHIP_REPORT:
1024 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1408 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1025 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; 1409 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1;
1026 err = br_multicast_add_group(br, port, ih->group); 1410 err = br_ip4_multicast_add_group(br, port, ih->group);
1027 break; 1411 break;
1028 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1412 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1029 err = br_multicast_igmp3_report(br, port, skb2); 1413 err = br_ip4_multicast_igmp3_report(br, port, skb2);
1030 break; 1414 break;
1031 case IGMP_HOST_MEMBERSHIP_QUERY: 1415 case IGMP_HOST_MEMBERSHIP_QUERY:
1032 err = br_multicast_query(br, port, skb2); 1416 err = br_ip4_multicast_query(br, port, skb2);
1033 break; 1417 break;
1034 case IGMP_HOST_LEAVE_MESSAGE: 1418 case IGMP_HOST_LEAVE_MESSAGE:
1035 br_multicast_leave_group(br, port, ih->group); 1419 br_ip4_multicast_leave_group(br, port, ih->group);
1036 break; 1420 break;
1037 } 1421 }
1038 1422
@@ -1044,15 +1428,139 @@ err_out:
1044 return err; 1428 return err;
1045} 1429}
1046 1430
1431#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1432static int br_multicast_ipv6_rcv(struct net_bridge *br,
1433 struct net_bridge_port *port,
1434 struct sk_buff *skb)
1435{
1436 struct sk_buff *skb2 = skb;
1437 struct ipv6hdr *ip6h;
1438 struct icmp6hdr *icmp6h;
1439 u8 nexthdr;
1440 unsigned len;
1441 unsigned offset;
1442 int err;
1443
1444 if (!pskb_may_pull(skb, sizeof(*ip6h)))
1445 return -EINVAL;
1446
1447 ip6h = ipv6_hdr(skb);
1448
1449 /*
1450 * We're interested in MLD messages only.
1451 * - Version is 6
1452 * - MLD has always Router Alert hop-by-hop option
1453 * - But we do not support jumbrograms.
1454 */
1455 if (ip6h->version != 6 ||
1456 ip6h->nexthdr != IPPROTO_HOPOPTS ||
1457 ip6h->payload_len == 0)
1458 return 0;
1459
1460 len = ntohs(ip6h->payload_len);
1461 if (skb->len < len)
1462 return -EINVAL;
1463
1464 nexthdr = ip6h->nexthdr;
1465 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
1466
1467 if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
1468 return 0;
1469
1470 /* Okay, we found ICMPv6 header */
1471 skb2 = skb_clone(skb, GFP_ATOMIC);
1472 if (!skb2)
1473 return -ENOMEM;
1474
1475 len -= offset - skb_network_offset(skb2);
1476
1477 __skb_pull(skb2, offset);
1478 skb_reset_transport_header(skb2);
1479
1480 err = -EINVAL;
1481 if (!pskb_may_pull(skb2, sizeof(*icmp6h)))
1482 goto out;
1483
1484 icmp6h = icmp6_hdr(skb2);
1485
1486 switch (icmp6h->icmp6_type) {
1487 case ICMPV6_MGM_QUERY:
1488 case ICMPV6_MGM_REPORT:
1489 case ICMPV6_MGM_REDUCTION:
1490 case ICMPV6_MLD2_REPORT:
1491 break;
1492 default:
1493 err = 0;
1494 goto out;
1495 }
1496
1497 /* Okay, we found MLD message. Check further. */
1498 if (skb2->len > len) {
1499 err = pskb_trim_rcsum(skb2, len);
1500 if (err)
1501 goto out;
1502 }
1503
1504 switch (skb2->ip_summed) {
1505 case CHECKSUM_COMPLETE:
1506 if (!csum_fold(skb2->csum))
1507 break;
1508 /*FALLTHROUGH*/
1509 case CHECKSUM_NONE:
1510 skb2->csum = 0;
1511 if (skb_checksum_complete(skb2))
1512 goto out;
1513 }
1514
1515 err = 0;
1516
1517 BR_INPUT_SKB_CB(skb)->igmp = 1;
1518
1519 switch (icmp6h->icmp6_type) {
1520 case ICMPV6_MGM_REPORT:
1521 {
1522 struct mld_msg *mld = (struct mld_msg *)icmp6h;
1523 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1;
1524 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca);
1525 break;
1526 }
1527 case ICMPV6_MLD2_REPORT:
1528 err = br_ip6_multicast_mld2_report(br, port, skb2);
1529 break;
1530 case ICMPV6_MGM_QUERY:
1531 err = br_ip6_multicast_query(br, port, skb2);
1532 break;
1533 case ICMPV6_MGM_REDUCTION:
1534 {
1535 struct mld_msg *mld = (struct mld_msg *)icmp6h;
1536 br_ip6_multicast_leave_group(br, port, &mld->mld_mca);
1537 }
1538 }
1539
1540out:
1541 __skb_push(skb2, offset);
1542 if (skb2 != skb)
1543 kfree_skb(skb2);
1544 return err;
1545}
1546#endif
1547
1047int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 1548int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1048 struct sk_buff *skb) 1549 struct sk_buff *skb)
1049{ 1550{
1551 BR_INPUT_SKB_CB(skb)->igmp = 0;
1552 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
1553
1050 if (br->multicast_disabled) 1554 if (br->multicast_disabled)
1051 return 0; 1555 return 0;
1052 1556
1053 switch (skb->protocol) { 1557 switch (skb->protocol) {
1054 case htons(ETH_P_IP): 1558 case htons(ETH_P_IP):
1055 return br_multicast_ipv4_rcv(br, port, skb); 1559 return br_multicast_ipv4_rcv(br, port, skb);
1560#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1561 case htons(ETH_P_IPV6):
1562 return br_multicast_ipv6_rcv(br, port, skb);
1563#endif
1056 } 1564 }
1057 1565
1058 return 0; 1566 return 0;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 791d4ab0fd4d..3d2d3fe0a97e 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -45,6 +45,17 @@ struct mac_addr
45 unsigned char addr[6]; 45 unsigned char addr[6];
46}; 46};
47 47
48struct br_ip
49{
50 union {
51 __be32 ip4;
52#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
53 struct in6_addr ip6;
54#endif
55 } u;
56 __be16 proto;
57};
58
48struct net_bridge_fdb_entry 59struct net_bridge_fdb_entry
49{ 60{
50 struct hlist_node hlist; 61 struct hlist_node hlist;
@@ -64,7 +75,7 @@ struct net_bridge_port_group {
64 struct rcu_head rcu; 75 struct rcu_head rcu;
65 struct timer_list timer; 76 struct timer_list timer;
66 struct timer_list query_timer; 77 struct timer_list query_timer;
67 __be32 addr; 78 struct br_ip addr;
68 u32 queries_sent; 79 u32 queries_sent;
69}; 80};
70 81
@@ -77,7 +88,7 @@ struct net_bridge_mdb_entry
77 struct rcu_head rcu; 88 struct rcu_head rcu;
78 struct timer_list timer; 89 struct timer_list timer;
79 struct timer_list query_timer; 90 struct timer_list query_timer;
80 __be32 addr; 91 struct br_ip addr;
81 u32 queries_sent; 92 u32 queries_sent;
82}; 93};
83 94
@@ -130,19 +141,20 @@ struct net_bridge_port
130#endif 141#endif
131}; 142};
132 143
144struct br_cpu_netstats {
145 unsigned long rx_packets;
146 unsigned long rx_bytes;
147 unsigned long tx_packets;
148 unsigned long tx_bytes;
149};
150
133struct net_bridge 151struct net_bridge
134{ 152{
135 spinlock_t lock; 153 spinlock_t lock;
136 struct list_head port_list; 154 struct list_head port_list;
137 struct net_device *dev; 155 struct net_device *dev;
138 156
139 struct br_cpu_netstats __percpu { 157 struct br_cpu_netstats __percpu *stats;
140 unsigned long rx_packets;
141 unsigned long rx_bytes;
142 unsigned long tx_packets;
143 unsigned long tx_bytes;
144 } *stats;
145
146 spinlock_t hash_lock; 158 spinlock_t hash_lock;
147 struct hlist_head hash[BR_HASH_SIZE]; 159 struct hlist_head hash[BR_HASH_SIZE];
148 unsigned long feature_mask; 160 unsigned long feature_mask;
@@ -241,6 +253,8 @@ static inline int br_is_root_bridge(const struct net_bridge *br)
241extern void br_dev_setup(struct net_device *dev); 253extern void br_dev_setup(struct net_device *dev);
242extern netdev_tx_t br_dev_xmit(struct sk_buff *skb, 254extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
243 struct net_device *dev); 255 struct net_device *dev);
256extern bool br_devices_support_netpoll(struct net_bridge *br);
257extern void br_netpoll_cleanup(struct net_device *br_dev);
244 258
245/* br_fdb.c */ 259/* br_fdb.c */
246extern int br_fdb_init(void); 260extern int br_fdb_init(void);
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index e84837e1bc86..024fd5bb2d39 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -247,10 +247,10 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
247 247
248 switch (caifdev->link_select) { 248 switch (caifdev->link_select) {
249 case CAIF_LINK_HIGH_BANDW: 249 case CAIF_LINK_HIGH_BANDW:
250 pref = CFPHYPREF_LOW_LAT; 250 pref = CFPHYPREF_HIGH_BW;
251 break; 251 break;
252 case CAIF_LINK_LOW_LATENCY: 252 case CAIF_LINK_LOW_LATENCY:
253 pref = CFPHYPREF_HIGH_BW; 253 pref = CFPHYPREF_LOW_LAT;
254 break; 254 break;
255 default: 255 default:
256 pref = CFPHYPREF_HIGH_BW; 256 pref = CFPHYPREF_HIGH_BW;
@@ -330,23 +330,28 @@ int caif_connect_client(struct caif_connect_request *conn_req,
330 struct cflayer *client_layer) 330 struct cflayer *client_layer)
331{ 331{
332 struct cfctrl_link_param param; 332 struct cfctrl_link_param param;
333 if (connect_req_to_link_param(get_caif_conf(), conn_req, &param) == 0) 333 int ret;
334 /* Hook up the adaptation layer. */ 334 ret = connect_req_to_link_param(get_caif_conf(), conn_req, &param);
335 return cfcnfg_add_adaptation_layer(get_caif_conf(), 335 if (ret)
336 return ret;
337 /* Hook up the adaptation layer. */
338 return cfcnfg_add_adaptation_layer(get_caif_conf(),
336 &param, client_layer); 339 &param, client_layer);
337
338 return -EINVAL;
339
340 caif_assert(0);
341} 340}
342EXPORT_SYMBOL(caif_connect_client); 341EXPORT_SYMBOL(caif_connect_client);
343 342
344int caif_disconnect_client(struct cflayer *adap_layer) 343int caif_disconnect_client(struct cflayer *adap_layer)
345{ 344{
346 return cfcnfg_del_adapt_layer(get_caif_conf(), adap_layer); 345 return cfcnfg_disconn_adapt_layer(get_caif_conf(), adap_layer);
347} 346}
348EXPORT_SYMBOL(caif_disconnect_client); 347EXPORT_SYMBOL(caif_disconnect_client);
349 348
349void caif_release_client(struct cflayer *adap_layer)
350{
351 cfcnfg_release_adap_layer(adap_layer);
352}
353EXPORT_SYMBOL(caif_release_client);
354
350/* Per-namespace Caif devices handling */ 355/* Per-namespace Caif devices handling */
351static int caif_init_net(struct net *net) 356static int caif_init_net(struct net *net)
352{ 357{
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index cdf62b9fefac..c3a70c5c893a 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * Copyright (C) ST-Ericsson AB 2010 2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland sjur.brandeland@stericsson.com 3 * Author: Sjur Brendeland sjur.brandeland@stericsson.com
4 * Per Sigmond per.sigmond@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
6 */ 5 */
7 6
@@ -16,91 +15,52 @@
16#include <linux/poll.h> 15#include <linux/poll.h>
17#include <linux/tcp.h> 16#include <linux/tcp.h>
18#include <linux/uaccess.h> 17#include <linux/uaccess.h>
19#include <asm/atomic.h> 18#include <linux/mutex.h>
20 19#include <linux/debugfs.h>
21#include <linux/caif/caif_socket.h> 20#include <linux/caif/caif_socket.h>
21#include <asm/atomic.h>
22#include <net/sock.h>
23#include <net/tcp_states.h>
22#include <net/caif/caif_layer.h> 24#include <net/caif/caif_layer.h>
23#include <net/caif/caif_dev.h> 25#include <net/caif/caif_dev.h>
24#include <net/caif/cfpkt.h> 26#include <net/caif/cfpkt.h>
25 27
26MODULE_LICENSE("GPL"); 28MODULE_LICENSE("GPL");
29MODULE_ALIAS_NETPROTO(AF_CAIF);
30
31#define CAIF_DEF_SNDBUF (CAIF_MAX_PAYLOAD_SIZE*10)
32#define CAIF_DEF_RCVBUF (CAIF_MAX_PAYLOAD_SIZE*100)
33
34/*
35 * CAIF state is re-using the TCP socket states.
36 * caif_states stored in sk_state reflect the state as reported by
37 * the CAIF stack, while sk_socket->state is the state of the socket.
38 */
39enum caif_states {
40 CAIF_CONNECTED = TCP_ESTABLISHED,
41 CAIF_CONNECTING = TCP_SYN_SENT,
42 CAIF_DISCONNECTED = TCP_CLOSE
43};
44
45#define TX_FLOW_ON_BIT 1
46#define RX_FLOW_ON_BIT 2
27 47
28#define CHNL_SKT_READ_QUEUE_HIGH 200
29#define CHNL_SKT_READ_QUEUE_LOW 100
30
31static int caif_sockbuf_size = 40000;
32static atomic_t caif_nr_socks = ATOMIC_INIT(0);
33
34#define CONN_STATE_OPEN_BIT 1
35#define CONN_STATE_PENDING_BIT 2
36#define CONN_STATE_PEND_DESTROY_BIT 3
37#define CONN_REMOTE_SHUTDOWN_BIT 4
38
39#define TX_FLOW_ON_BIT 1
40#define RX_FLOW_ON_BIT 2
41
42#define STATE_IS_OPEN(cf_sk) test_bit(CONN_STATE_OPEN_BIT,\
43 (void *) &(cf_sk)->conn_state)
44#define STATE_IS_REMOTE_SHUTDOWN(cf_sk) test_bit(CONN_REMOTE_SHUTDOWN_BIT,\
45 (void *) &(cf_sk)->conn_state)
46#define STATE_IS_PENDING(cf_sk) test_bit(CONN_STATE_PENDING_BIT,\
47 (void *) &(cf_sk)->conn_state)
48#define STATE_IS_PENDING_DESTROY(cf_sk) test_bit(CONN_STATE_PEND_DESTROY_BIT,\
49 (void *) &(cf_sk)->conn_state)
50
51#define SET_STATE_PENDING_DESTROY(cf_sk) set_bit(CONN_STATE_PEND_DESTROY_BIT,\
52 (void *) &(cf_sk)->conn_state)
53#define SET_STATE_OPEN(cf_sk) set_bit(CONN_STATE_OPEN_BIT,\
54 (void *) &(cf_sk)->conn_state)
55#define SET_STATE_CLOSED(cf_sk) clear_bit(CONN_STATE_OPEN_BIT,\
56 (void *) &(cf_sk)->conn_state)
57#define SET_PENDING_ON(cf_sk) set_bit(CONN_STATE_PENDING_BIT,\
58 (void *) &(cf_sk)->conn_state)
59#define SET_PENDING_OFF(cf_sk) clear_bit(CONN_STATE_PENDING_BIT,\
60 (void *) &(cf_sk)->conn_state)
61#define SET_REMOTE_SHUTDOWN(cf_sk) set_bit(CONN_REMOTE_SHUTDOWN_BIT,\
62 (void *) &(cf_sk)->conn_state)
63
64#define SET_REMOTE_SHUTDOWN_OFF(dev) clear_bit(CONN_REMOTE_SHUTDOWN_BIT,\
65 (void *) &(dev)->conn_state)
66#define RX_FLOW_IS_ON(cf_sk) test_bit(RX_FLOW_ON_BIT,\
67 (void *) &(cf_sk)->flow_state)
68#define TX_FLOW_IS_ON(cf_sk) test_bit(TX_FLOW_ON_BIT,\
69 (void *) &(cf_sk)->flow_state)
70
71#define SET_RX_FLOW_OFF(cf_sk) clear_bit(RX_FLOW_ON_BIT,\
72 (void *) &(cf_sk)->flow_state)
73#define SET_RX_FLOW_ON(cf_sk) set_bit(RX_FLOW_ON_BIT,\
74 (void *) &(cf_sk)->flow_state)
75#define SET_TX_FLOW_OFF(cf_sk) clear_bit(TX_FLOW_ON_BIT,\
76 (void *) &(cf_sk)->flow_state)
77#define SET_TX_FLOW_ON(cf_sk) set_bit(TX_FLOW_ON_BIT,\
78 (void *) &(cf_sk)->flow_state)
79
80#define SKT_READ_FLAG 0x01
81#define SKT_WRITE_FLAG 0x02
82static struct dentry *debugfsdir; 48static struct dentry *debugfsdir;
83#include <linux/debugfs.h>
84 49
85#ifdef CONFIG_DEBUG_FS 50#ifdef CONFIG_DEBUG_FS
86struct debug_fs_counter { 51struct debug_fs_counter {
87 atomic_t num_open; 52 atomic_t caif_nr_socks;
88 atomic_t num_close; 53 atomic_t num_connect_req;
89 atomic_t num_init; 54 atomic_t num_connect_resp;
90 atomic_t num_init_resp; 55 atomic_t num_connect_fail_resp;
91 atomic_t num_init_fail_resp; 56 atomic_t num_disconnect;
92 atomic_t num_deinit;
93 atomic_t num_deinit_resp;
94 atomic_t num_remote_shutdown_ind; 57 atomic_t num_remote_shutdown_ind;
95 atomic_t num_tx_flow_off_ind; 58 atomic_t num_tx_flow_off_ind;
96 atomic_t num_tx_flow_on_ind; 59 atomic_t num_tx_flow_on_ind;
97 atomic_t num_rx_flow_off; 60 atomic_t num_rx_flow_off;
98 atomic_t num_rx_flow_on; 61 atomic_t num_rx_flow_on;
99 atomic_t skb_in_use;
100 atomic_t skb_alloc;
101 atomic_t skb_free;
102}; 62};
103static struct debug_fs_counter cnt; 63struct debug_fs_counter cnt;
104#define dbfs_atomic_inc(v) atomic_inc(v) 64#define dbfs_atomic_inc(v) atomic_inc(v)
105#define dbfs_atomic_dec(v) atomic_dec(v) 65#define dbfs_atomic_dec(v) atomic_dec(v)
106#else 66#else
@@ -108,624 +68,666 @@ static struct debug_fs_counter cnt;
108#define dbfs_atomic_dec(v) 68#define dbfs_atomic_dec(v)
109#endif 69#endif
110 70
111/* The AF_CAIF socket */
112struct caifsock { 71struct caifsock {
113 /* NOTE: sk has to be the first member */ 72 struct sock sk; /* must be first member */
114 struct sock sk;
115 struct cflayer layer; 73 struct cflayer layer;
116 char name[CAIF_LAYER_NAME_SZ]; 74 char name[CAIF_LAYER_NAME_SZ]; /* Used for debugging */
117 u32 conn_state;
118 u32 flow_state; 75 u32 flow_state;
119 struct cfpktq *pktq;
120 int file_mode;
121 struct caif_connect_request conn_req; 76 struct caif_connect_request conn_req;
122 int read_queue_len; 77 struct mutex readlock;
123 /* protect updates of read_queue_len */
124 spinlock_t read_queue_len_lock;
125 struct dentry *debugfs_socket_dir; 78 struct dentry *debugfs_socket_dir;
126}; 79};
127 80
128static void drain_queue(struct caifsock *cf_sk); 81static int rx_flow_is_on(struct caifsock *cf_sk)
82{
83 return test_bit(RX_FLOW_ON_BIT,
84 (void *) &cf_sk->flow_state);
85}
86
87static int tx_flow_is_on(struct caifsock *cf_sk)
88{
89 return test_bit(TX_FLOW_ON_BIT,
90 (void *) &cf_sk->flow_state);
91}
129 92
130/* Packet Receive Callback function called from CAIF Stack */ 93static void set_rx_flow_off(struct caifsock *cf_sk)
131static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt)
132{ 94{
133 struct caifsock *cf_sk; 95 clear_bit(RX_FLOW_ON_BIT,
134 int read_queue_high; 96 (void *) &cf_sk->flow_state);
135 cf_sk = container_of(layr, struct caifsock, layer); 97}
136 98
137 if (!STATE_IS_OPEN(cf_sk)) { 99static void set_rx_flow_on(struct caifsock *cf_sk)
138 /*FIXME: This should be allowed finally!*/ 100{
139 pr_debug("CAIF: %s(): called after close request\n", __func__); 101 set_bit(RX_FLOW_ON_BIT,
140 cfpkt_destroy(pkt); 102 (void *) &cf_sk->flow_state);
141 return 0; 103}
142 }
143 /* NOTE: This function may be called in Tasklet context! */
144 104
145 /* The queue has its own lock */ 105static void set_tx_flow_off(struct caifsock *cf_sk)
146 cfpkt_queue(cf_sk->pktq, pkt, 0); 106{
107 clear_bit(TX_FLOW_ON_BIT,
108 (void *) &cf_sk->flow_state);
109}
147 110
148 spin_lock(&cf_sk->read_queue_len_lock); 111static void set_tx_flow_on(struct caifsock *cf_sk)
149 cf_sk->read_queue_len++; 112{
113 set_bit(TX_FLOW_ON_BIT,
114 (void *) &cf_sk->flow_state);
115}
150 116
151 read_queue_high = (cf_sk->read_queue_len > CHNL_SKT_READ_QUEUE_HIGH); 117static void caif_read_lock(struct sock *sk)
152 spin_unlock(&cf_sk->read_queue_len_lock); 118{
119 struct caifsock *cf_sk;
120 cf_sk = container_of(sk, struct caifsock, sk);
121 mutex_lock(&cf_sk->readlock);
122}
153 123
154 if (RX_FLOW_IS_ON(cf_sk) && read_queue_high) { 124static void caif_read_unlock(struct sock *sk)
155 dbfs_atomic_inc(&cnt.num_rx_flow_off); 125{
156 SET_RX_FLOW_OFF(cf_sk); 126 struct caifsock *cf_sk;
127 cf_sk = container_of(sk, struct caifsock, sk);
128 mutex_unlock(&cf_sk->readlock);
129}
157 130
158 /* Send flow off (NOTE: must not sleep) */ 131int sk_rcvbuf_lowwater(struct caifsock *cf_sk)
159 pr_debug("CAIF: %s():" 132{
160 " sending flow OFF (queue len = %d)\n", 133 /* A quarter of full buffer is used a low water mark */
161 __func__, 134 return cf_sk->sk.sk_rcvbuf / 4;
162 cf_sk->read_queue_len); 135}
163 caif_assert(cf_sk->layer.dn);
164 caif_assert(cf_sk->layer.dn->ctrlcmd);
165 136
166 (void) cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, 137void caif_flow_ctrl(struct sock *sk, int mode)
167 CAIF_MODEMCMD_FLOW_OFF_REQ); 138{
168 } 139 struct caifsock *cf_sk;
140 cf_sk = container_of(sk, struct caifsock, sk);
141 if (cf_sk->layer.dn)
142 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode);
143}
169 144
170 /* Signal reader that data is available. */ 145/*
146 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
147 * not dropped, but CAIF is sending flow off instead.
148 */
149int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
150{
151 int err;
152 int skb_len;
153 unsigned long flags;
154 struct sk_buff_head *list = &sk->sk_receive_queue;
155 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
171 156
172 wake_up_interruptible(cf_sk->sk.sk_sleep); 157 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
158 (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
159 trace_printk("CAIF: %s():"
160 " sending flow OFF (queue len = %d %d)\n",
161 __func__,
162 atomic_read(&cf_sk->sk.sk_rmem_alloc),
163 sk_rcvbuf_lowwater(cf_sk));
164 set_rx_flow_off(cf_sk);
165 if (cf_sk->layer.dn)
166 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
167 CAIF_MODEMCMD_FLOW_OFF_REQ);
168 }
173 169
170 err = sk_filter(sk, skb);
171 if (err)
172 return err;
173 if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
174 set_rx_flow_off(cf_sk);
175 trace_printk("CAIF: %s():"
176 " sending flow OFF due to rmem_schedule\n",
177 __func__);
178 if (cf_sk->layer.dn)
179 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
180 CAIF_MODEMCMD_FLOW_OFF_REQ);
181 }
182 skb->dev = NULL;
183 skb_set_owner_r(skb, sk);
184 /* Cache the SKB length before we tack it onto the receive
185 * queue. Once it is added it no longer belongs to us and
186 * may be freed by other threads of control pulling packets
187 * from the queue.
188 */
189 skb_len = skb->len;
190 spin_lock_irqsave(&list->lock, flags);
191 if (!sock_flag(sk, SOCK_DEAD))
192 __skb_queue_tail(list, skb);
193 spin_unlock_irqrestore(&list->lock, flags);
194
195 if (!sock_flag(sk, SOCK_DEAD))
196 sk->sk_data_ready(sk, skb_len);
197 else
198 kfree_skb(skb);
174 return 0; 199 return 0;
175} 200}
176 201
177/* Packet Flow Control Callback function called from CAIF */ 202/* Packet Receive Callback function called from CAIF Stack */
178static void caif_sktflowctrl_cb(struct cflayer *layr, 203static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt)
179 enum caif_ctrlcmd flow,
180 int phyid)
181{ 204{
182 struct caifsock *cf_sk; 205 struct caifsock *cf_sk;
183 206 struct sk_buff *skb;
184 /* NOTE: This function may be called in Tasklet context! */
185 pr_debug("CAIF: %s(): flowctrl func called: %s.\n",
186 __func__,
187 flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" :
188 flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" :
189 flow == CAIF_CTRLCMD_INIT_RSP ? "INIT_RSP" :
190 flow == CAIF_CTRLCMD_DEINIT_RSP ? "DEINIT_RSP" :
191 flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "INIT_FAIL_RSP" :
192 flow ==
193 CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ? "REMOTE_SHUTDOWN" :
194 "UKNOWN CTRL COMMAND");
195
196 if (layr == NULL)
197 return;
198 207
199 cf_sk = container_of(layr, struct caifsock, layer); 208 cf_sk = container_of(layr, struct caifsock, layer);
209 skb = cfpkt_tonative(pkt);
210
211 if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) {
212 cfpkt_destroy(pkt);
213 return 0;
214 }
215 caif_queue_rcv_skb(&cf_sk->sk, skb);
216 return 0;
217}
200 218
219/* Packet Control Callback function called from CAIF */
220static void caif_ctrl_cb(struct cflayer *layr,
221 enum caif_ctrlcmd flow,
222 int phyid)
223{
224 struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
201 switch (flow) { 225 switch (flow) {
202 case CAIF_CTRLCMD_FLOW_ON_IND: 226 case CAIF_CTRLCMD_FLOW_ON_IND:
227 /* OK from modem to start sending again */
203 dbfs_atomic_inc(&cnt.num_tx_flow_on_ind); 228 dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
204 /* Signal reader that data is available. */ 229 set_tx_flow_on(cf_sk);
205 SET_TX_FLOW_ON(cf_sk); 230 cf_sk->sk.sk_state_change(&cf_sk->sk);
206 wake_up_interruptible(cf_sk->sk.sk_sleep);
207 break; 231 break;
208 232
209 case CAIF_CTRLCMD_FLOW_OFF_IND: 233 case CAIF_CTRLCMD_FLOW_OFF_IND:
234 /* Modem asks us to shut up */
210 dbfs_atomic_inc(&cnt.num_tx_flow_off_ind); 235 dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
211 SET_TX_FLOW_OFF(cf_sk); 236 set_tx_flow_off(cf_sk);
237 cf_sk->sk.sk_state_change(&cf_sk->sk);
212 break; 238 break;
213 239
214 case CAIF_CTRLCMD_INIT_RSP: 240 case CAIF_CTRLCMD_INIT_RSP:
215 dbfs_atomic_inc(&cnt.num_init_resp); 241 /* We're now connected */
216 /* Signal reader that data is available. */ 242 dbfs_atomic_inc(&cnt.num_connect_resp);
217 caif_assert(STATE_IS_OPEN(cf_sk)); 243 cf_sk->sk.sk_state = CAIF_CONNECTED;
218 SET_PENDING_OFF(cf_sk); 244 set_tx_flow_on(cf_sk);
219 SET_TX_FLOW_ON(cf_sk); 245 cf_sk->sk.sk_state_change(&cf_sk->sk);
220 wake_up_interruptible(cf_sk->sk.sk_sleep);
221 break; 246 break;
222 247
223 case CAIF_CTRLCMD_DEINIT_RSP: 248 case CAIF_CTRLCMD_DEINIT_RSP:
224 dbfs_atomic_inc(&cnt.num_deinit_resp); 249 /* We're now disconnected */
225 caif_assert(!STATE_IS_OPEN(cf_sk)); 250 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
226 SET_PENDING_OFF(cf_sk); 251 cf_sk->sk.sk_state_change(&cf_sk->sk);
227 if (!STATE_IS_PENDING_DESTROY(cf_sk)) { 252 cfcnfg_release_adap_layer(&cf_sk->layer);
228 if (cf_sk->sk.sk_sleep != NULL)
229 wake_up_interruptible(cf_sk->sk.sk_sleep);
230 }
231 dbfs_atomic_inc(&cnt.num_deinit);
232 sock_put(&cf_sk->sk);
233 break; 253 break;
234 254
235 case CAIF_CTRLCMD_INIT_FAIL_RSP: 255 case CAIF_CTRLCMD_INIT_FAIL_RSP:
236 dbfs_atomic_inc(&cnt.num_init_fail_resp); 256 /* Connect request failed */
237 caif_assert(STATE_IS_OPEN(cf_sk)); 257 dbfs_atomic_inc(&cnt.num_connect_fail_resp);
238 SET_STATE_CLOSED(cf_sk); 258 cf_sk->sk.sk_err = ECONNREFUSED;
239 SET_PENDING_OFF(cf_sk); 259 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
240 SET_TX_FLOW_OFF(cf_sk); 260 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
241 wake_up_interruptible(cf_sk->sk.sk_sleep); 261 /*
262 * Socket "standards" seems to require POLLOUT to
263 * be set at connect failure.
264 */
265 set_tx_flow_on(cf_sk);
266 cf_sk->sk.sk_state_change(&cf_sk->sk);
242 break; 267 break;
243 268
244 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: 269 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
270 /* Modem has closed this connection, or device is down. */
245 dbfs_atomic_inc(&cnt.num_remote_shutdown_ind); 271 dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
246 SET_REMOTE_SHUTDOWN(cf_sk); 272 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
247 /* Use sk_shutdown to indicate remote shutdown indication */ 273 cf_sk->sk.sk_err = ECONNRESET;
248 cf_sk->sk.sk_shutdown |= RCV_SHUTDOWN; 274 set_rx_flow_on(cf_sk);
249 cf_sk->file_mode = 0; 275 cf_sk->sk.sk_error_report(&cf_sk->sk);
250 wake_up_interruptible(cf_sk->sk.sk_sleep);
251 break; 276 break;
252 277
253 default: 278 default:
254 pr_debug("CAIF: %s(): Unexpected flow command %d\n", 279 pr_debug("CAIF: %s(): Unexpected flow command %d\n",
255 __func__, flow); 280 __func__, flow);
256 } 281 }
257} 282}
258 283
259static void skb_destructor(struct sk_buff *skb) 284static void caif_check_flow_release(struct sock *sk)
260{ 285{
261 dbfs_atomic_inc(&cnt.skb_free); 286 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
262 dbfs_atomic_dec(&cnt.skb_in_use);
263}
264 287
288 if (cf_sk->layer.dn == NULL || cf_sk->layer.dn->modemcmd == NULL)
289 return;
290 if (rx_flow_is_on(cf_sk))
291 return;
265 292
266static int caif_recvmsg(struct kiocb *iocb, struct socket *sock, 293 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
294 dbfs_atomic_inc(&cnt.num_rx_flow_on);
295 set_rx_flow_on(cf_sk);
296 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
297 CAIF_MODEMCMD_FLOW_ON_REQ);
298 }
299}
300/*
301 * Copied from sock.c:sock_queue_rcv_skb(), and added check that user buffer
302 * has sufficient size.
303 */
304
305static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
267 struct msghdr *m, size_t buf_len, int flags) 306 struct msghdr *m, size_t buf_len, int flags)
268 307
269{ 308{
270 struct sock *sk = sock->sk; 309 struct sock *sk = sock->sk;
271 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
272 struct cfpkt *pkt = NULL;
273 size_t len;
274 int result;
275 struct sk_buff *skb; 310 struct sk_buff *skb;
276 ssize_t ret = -EIO; 311 int ret = 0;
277 int read_queue_low; 312 int len;
278
279 if (cf_sk == NULL) {
280 pr_debug("CAIF: %s(): private_data not set!\n",
281 __func__);
282 ret = -EBADFD;
283 goto read_error;
284 }
285
286 /* Don't do multiple iovec entries yet */
287 if (m->msg_iovlen != 1)
288 return -EOPNOTSUPP;
289 313
290 if (unlikely(!buf_len)) 314 if (unlikely(!buf_len))
291 return -EINVAL; 315 return -EINVAL;
292 316
293 lock_sock(&(cf_sk->sk)); 317 skb = skb_recv_datagram(sk, flags, 0 , &ret);
294 318 if (!skb)
295 caif_assert(cf_sk->pktq);
296
297 if (!STATE_IS_OPEN(cf_sk)) {
298 /* Socket is closed or closing. */
299 if (!STATE_IS_PENDING(cf_sk)) {
300 pr_debug("CAIF: %s(): socket is closed (by remote)\n",
301 __func__);
302 ret = -EPIPE;
303 } else {
304 pr_debug("CAIF: %s(): socket is closing..\n", __func__);
305 ret = -EBADF;
306 }
307 goto read_error; 319 goto read_error;
308 }
309 /* Socket is open or opening. */
310 if (STATE_IS_PENDING(cf_sk)) {
311 pr_debug("CAIF: %s(): socket is opening...\n", __func__);
312
313 if (flags & MSG_DONTWAIT) {
314 /* We can't block. */
315 pr_debug("CAIF: %s():state pending and MSG_DONTWAIT\n",
316 __func__);
317 ret = -EAGAIN;
318 goto read_error;
319 }
320 320
321 len = skb->len;
322
323 if (skb && skb->len > buf_len && !(flags & MSG_PEEK)) {
324 len = buf_len;
321 /* 325 /*
322 * Blocking mode; state is pending and we need to wait 326 * Push skb back on receive queue if buffer too small.
323 * for its conclusion. 327 * This has a built-in race where multi-threaded receive
328 * may get packet in wrong order, but multiple read does
329 * not really guarantee ordered delivery anyway.
330 * Let's optimize for speed without taking locks.
324 */ 331 */
325 release_sock(&cf_sk->sk);
326
327 result =
328 wait_event_interruptible(*cf_sk->sk.sk_sleep,
329 !STATE_IS_PENDING(cf_sk));
330 332
331 lock_sock(&(cf_sk->sk)); 333 skb_queue_head(&sk->sk_receive_queue, skb);
332 334 ret = -EMSGSIZE;
333 if (result == -ERESTARTSYS) { 335 goto read_error;
334 pr_debug("CAIF: %s(): wait_event_interruptible"
335 " woken by a signal (1)", __func__);
336 ret = -ERESTARTSYS;
337 goto read_error;
338 }
339 } 336 }
340 337
341 if (STATE_IS_REMOTE_SHUTDOWN(cf_sk) || 338 ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len);
342 !STATE_IS_OPEN(cf_sk) || 339 if (ret)
343 STATE_IS_PENDING(cf_sk)) {
344
345 pr_debug("CAIF: %s(): socket closed\n",
346 __func__);
347 ret = -ESHUTDOWN;
348 goto read_error; 340 goto read_error;
349 }
350 341
351 /* 342 skb_free_datagram(sk, skb);
352 * Block if we don't have any received buffers.
353 * The queue has its own lock.
354 */
355 while ((pkt = cfpkt_qpeek(cf_sk->pktq)) == NULL) {
356 343
357 if (flags & MSG_DONTWAIT) { 344 caif_check_flow_release(sk);
358 pr_debug("CAIF: %s(): MSG_DONTWAIT\n", __func__);
359 ret = -EAGAIN;
360 goto read_error;
361 }
362 trace_printk("CAIF: %s() wait_event\n", __func__);
363 345
364 /* Let writers in. */ 346 return len;
365 release_sock(&cf_sk->sk);
366 347
367 /* Block reader until data arrives or socket is closed. */ 348read_error:
368 if (wait_event_interruptible(*cf_sk->sk.sk_sleep, 349 return ret;
369 cfpkt_qpeek(cf_sk->pktq) 350}
370 || STATE_IS_REMOTE_SHUTDOWN(cf_sk)
371 || !STATE_IS_OPEN(cf_sk)) ==
372 -ERESTARTSYS) {
373 pr_debug("CAIF: %s():"
374 " wait_event_interruptible woken by "
375 "a signal, signal_pending(current) = %d\n",
376 __func__,
377 signal_pending(current));
378 return -ERESTARTSYS;
379 }
380 351
381 trace_printk("CAIF: %s() awake\n", __func__);
382 if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) {
383 pr_debug("CAIF: %s(): "
384 "received remote_shutdown indication\n",
385 __func__);
386 ret = -ESHUTDOWN;
387 goto read_error_no_unlock;
388 }
389 352
390 /* I want to be alone on cf_sk (except status and queue). */ 353/* Copied from unix_stream_wait_data, identical except for lock call. */
391 lock_sock(&(cf_sk->sk)); 354static long caif_stream_data_wait(struct sock *sk, long timeo)
355{
356 DEFINE_WAIT(wait);
357 lock_sock(sk);
358
359 for (;;) {
360 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
361
362 if (!skb_queue_empty(&sk->sk_receive_queue) ||
363 sk->sk_err ||
364 sk->sk_state != CAIF_CONNECTED ||
365 sock_flag(sk, SOCK_DEAD) ||
366 (sk->sk_shutdown & RCV_SHUTDOWN) ||
367 signal_pending(current) ||
368 !timeo)
369 break;
392 370
393 if (!STATE_IS_OPEN(cf_sk)) { 371 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
394 /* Someone closed the link, report error. */ 372 release_sock(sk);
395 pr_debug("CAIF: %s(): remote end shutdown!\n", 373 timeo = schedule_timeout(timeo);
396 __func__); 374 lock_sock(sk);
397 ret = -EPIPE; 375 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
398 goto read_error;
399 }
400 } 376 }
401 377
402 /* The queue has its own lock. */ 378 finish_wait(sk_sleep(sk), &wait);
403 len = cfpkt_getlen(pkt); 379 release_sock(sk);
404 380 return timeo;
405 /* Check max length that can be copied. */ 381}
406 if (len <= buf_len)
407 pkt = cfpkt_dequeue(cf_sk->pktq);
408 else {
409 pr_debug("CAIF: %s(): user buffer too small (%ld,%ld)\n",
410 __func__, (long) len, (long) buf_len);
411 if (sock->type == SOCK_SEQPACKET) {
412 ret = -EMSGSIZE;
413 goto read_error;
414 }
415 len = buf_len;
416 }
417 382
418 383
419 spin_lock(&cf_sk->read_queue_len_lock); 384/*
420 cf_sk->read_queue_len--; 385 * Copied from unix_stream_recvmsg, but removed credit checks,
421 read_queue_low = (cf_sk->read_queue_len < CHNL_SKT_READ_QUEUE_LOW); 386 * changed locking calls, changed address handling.
422 spin_unlock(&cf_sk->read_queue_len_lock); 387 */
388static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
389 struct msghdr *msg, size_t size,
390 int flags)
391{
392 struct sock *sk = sock->sk;
393 int copied = 0;
394 int target;
395 int err = 0;
396 long timeo;
423 397
424 if (!RX_FLOW_IS_ON(cf_sk) && read_queue_low) { 398 err = -EOPNOTSUPP;
425 dbfs_atomic_inc(&cnt.num_rx_flow_on); 399 if (flags&MSG_OOB)
426 SET_RX_FLOW_ON(cf_sk); 400 goto out;
427 401
428 /* Send flow on. */ 402 msg->msg_namelen = 0;
429 pr_debug("CAIF: %s(): sending flow ON (queue len = %d)\n",
430 __func__, cf_sk->read_queue_len);
431 caif_assert(cf_sk->layer.dn);
432 caif_assert(cf_sk->layer.dn->ctrlcmd);
433 (void) cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
434 CAIF_MODEMCMD_FLOW_ON_REQ);
435 403
436 caif_assert(cf_sk->read_queue_len >= 0); 404 /*
437 } 405 * Lock the socket to prevent queue disordering
406 * while sleeps in memcpy_tomsg
407 */
408 err = -EAGAIN;
409 if (sk->sk_state == CAIF_CONNECTING)
410 goto out;
438 411
439 skb = cfpkt_tonative(pkt); 412 caif_read_lock(sk);
440 result = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len); 413 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
441 skb_pull(skb, len); 414 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
442 415
443 if (result) { 416 do {
444 pr_debug("CAIF: %s(): copy to_iovec failed\n", __func__); 417 int chunk;
445 cfpkt_destroy(pkt); 418 struct sk_buff *skb;
446 ret = -EFAULT;
447 goto read_error;
448 }
449 419
450 /* Free packet and remove from queue */ 420 lock_sock(sk);
451 if (skb->len == 0) 421 skb = skb_dequeue(&sk->sk_receive_queue);
452 skb_free_datagram(sk, skb); 422 caif_check_flow_release(sk);
453 423
454 /* Let the others in. */ 424 if (skb == NULL) {
455 release_sock(&cf_sk->sk); 425 if (copied >= target)
456 return len; 426 goto unlock;
427 /*
428 * POSIX 1003.1g mandates this order.
429 */
430 err = sock_error(sk);
431 if (err)
432 goto unlock;
433 err = -ECONNRESET;
434 if (sk->sk_shutdown & RCV_SHUTDOWN)
435 goto unlock;
457 436
458read_error: 437 err = -EPIPE;
459 release_sock(&cf_sk->sk); 438 if (sk->sk_state != CAIF_CONNECTED)
460read_error_no_unlock: 439 goto unlock;
461 return ret; 440 if (sock_flag(sk, SOCK_DEAD))
462} 441 goto unlock;
463 442
464/* Send a signal as a consequence of sendmsg, sendto or caif_sendmsg. */ 443 release_sock(sk);
465static int caif_sendmsg(struct kiocb *kiocb, struct socket *sock,
466 struct msghdr *msg, size_t len)
467{
468 444
469 struct sock *sk = sock->sk; 445 err = -EAGAIN;
470 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 446 if (!timeo)
471 size_t payload_size = msg->msg_iov->iov_len; 447 break;
472 struct cfpkt *pkt = NULL;
473 struct caif_payload_info info;
474 unsigned char *txbuf;
475 ssize_t ret = -EIO;
476 int result;
477 struct sk_buff *skb;
478 caif_assert(msg->msg_iovlen == 1);
479 448
480 if (cf_sk == NULL) { 449 caif_read_unlock(sk);
481 pr_debug("CAIF: %s(): private_data not set!\n",
482 __func__);
483 ret = -EBADFD;
484 goto write_error_no_unlock;
485 }
486 450
487 if (unlikely(msg->msg_iov->iov_base == NULL)) { 451 timeo = caif_stream_data_wait(sk, timeo);
488 pr_warning("CAIF: %s(): Buffer is NULL.\n", __func__);
489 ret = -EINVAL;
490 goto write_error_no_unlock;
491 }
492 452
493 if (payload_size > CAIF_MAX_PAYLOAD_SIZE) { 453 if (signal_pending(current)) {
494 pr_debug("CAIF: %s(): buffer too long\n", __func__); 454 err = sock_intr_errno(timeo);
495 if (sock->type == SOCK_SEQPACKET) { 455 goto out;
496 ret = -EINVAL; 456 }
497 goto write_error_no_unlock; 457 caif_read_lock(sk);
458 continue;
459unlock:
460 release_sock(sk);
461 break;
498 } 462 }
499 payload_size = CAIF_MAX_PAYLOAD_SIZE; 463 release_sock(sk);
500 } 464 chunk = min_t(unsigned int, skb->len, size);
465 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
466 skb_queue_head(&sk->sk_receive_queue, skb);
467 if (copied == 0)
468 copied = -EFAULT;
469 break;
470 }
471 copied += chunk;
472 size -= chunk;
501 473
502 /* I want to be alone on cf_sk (except status and queue) */ 474 /* Mark read part of skb as used */
503 lock_sock(&(cf_sk->sk)); 475 if (!(flags & MSG_PEEK)) {
476 skb_pull(skb, chunk);
504 477
505 caif_assert(cf_sk->pktq); 478 /* put the skb back if we didn't use it up. */
479 if (skb->len) {
480 skb_queue_head(&sk->sk_receive_queue, skb);
481 break;
482 }
483 kfree_skb(skb);
506 484
507 if (!STATE_IS_OPEN(cf_sk)) {
508 /* Socket is closed or closing */
509 if (!STATE_IS_PENDING(cf_sk)) {
510 pr_debug("CAIF: %s(): socket is closed (by remote)\n",
511 __func__);
512 ret = -EPIPE;
513 } else { 485 } else {
514 pr_debug("CAIF: %s(): socket is closing...\n", 486 /*
515 __func__); 487 * It is questionable, see note in unix_dgram_recvmsg.
516 ret = -EBADF; 488 */
517 } 489 /* put message back and return */
518 goto write_error; 490 skb_queue_head(&sk->sk_receive_queue, skb);
519 } 491 break;
520
521 /* Socket is open or opening */
522 if (STATE_IS_PENDING(cf_sk)) {
523 pr_debug("CAIF: %s(): socket is opening...\n", __func__);
524
525 if (msg->msg_flags & MSG_DONTWAIT) {
526 /* We can't block */
527 trace_printk("CAIF: %s():state pending:"
528 "state=MSG_DONTWAIT\n", __func__);
529 ret = -EAGAIN;
530 goto write_error;
531 } 492 }
532 /* Let readers in */ 493 } while (size);
533 release_sock(&cf_sk->sk); 494 caif_read_unlock(sk);
534
535 /*
536 * Blocking mode; state is pending and we need to wait
537 * for its conclusion.
538 */
539 result =
540 wait_event_interruptible(*cf_sk->sk.sk_sleep,
541 !STATE_IS_PENDING(cf_sk));
542 /* I want to be alone on cf_sk (except status and queue) */
543 lock_sock(&(cf_sk->sk));
544 495
545 if (result == -ERESTARTSYS) { 496out:
546 pr_debug("CAIF: %s(): wait_event_interruptible" 497 return copied ? : err;
547 " woken by a signal (1)", __func__); 498}
548 ret = -ERESTARTSYS;
549 goto write_error;
550 }
551 }
552 if (STATE_IS_REMOTE_SHUTDOWN(cf_sk) ||
553 !STATE_IS_OPEN(cf_sk) ||
554 STATE_IS_PENDING(cf_sk)) {
555 499
556 pr_debug("CAIF: %s(): socket closed\n", 500/*
557 __func__); 501 * Copied from sock.c:sock_wait_for_wmem, but change to wait for
558 ret = -ESHUTDOWN; 502 * CAIF flow-on and sock_writable.
559 goto write_error; 503 */
504static long caif_wait_for_flow_on(struct caifsock *cf_sk,
505 int wait_writeable, long timeo, int *err)
506{
507 struct sock *sk = &cf_sk->sk;
508 DEFINE_WAIT(wait);
509 for (;;) {
510 *err = 0;
511 if (tx_flow_is_on(cf_sk) &&
512 (!wait_writeable || sock_writeable(&cf_sk->sk)))
513 break;
514 *err = -ETIMEDOUT;
515 if (!timeo)
516 break;
517 *err = -ERESTARTSYS;
518 if (signal_pending(current))
519 break;
520 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
521 *err = -ECONNRESET;
522 if (sk->sk_shutdown & SHUTDOWN_MASK)
523 break;
524 *err = -sk->sk_err;
525 if (sk->sk_err)
526 break;
527 *err = -EPIPE;
528 if (cf_sk->sk.sk_state != CAIF_CONNECTED)
529 break;
530 timeo = schedule_timeout(timeo);
560 } 531 }
532 finish_wait(sk_sleep(sk), &wait);
533 return timeo;
534}
561 535
562 if (!TX_FLOW_IS_ON(cf_sk)) { 536/*
537 * Transmit a SKB. The device may temporarily request re-transmission
538 * by returning EAGAIN.
539 */
540static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
541 int noblock, long timeo)
542{
543 struct cfpkt *pkt;
544 int ret, loopcnt = 0;
563 545
564 /* Flow is off. Check non-block flag */ 546 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
565 if (msg->msg_flags & MSG_DONTWAIT) { 547 memset(cfpkt_info(pkt), 0, sizeof(struct caif_payload_info));
566 trace_printk("CAIF: %s(): MSG_DONTWAIT and tx flow off", 548 do {
567 __func__);
568 ret = -EAGAIN;
569 goto write_error;
570 }
571 549
572 /* release lock before waiting */ 550 ret = -ETIMEDOUT;
573 release_sock(&cf_sk->sk);
574 551
575 /* Wait until flow is on or socket is closed */ 552 /* Slight paranoia, probably not needed. */
576 if (wait_event_interruptible(*cf_sk->sk.sk_sleep, 553 if (unlikely(loopcnt++ > 1000)) {
577 TX_FLOW_IS_ON(cf_sk) 554 pr_warning("CAIF: %s(): transmit retries failed,"
578 || !STATE_IS_OPEN(cf_sk) 555 " error = %d\n", __func__, ret);
579 || STATE_IS_REMOTE_SHUTDOWN(cf_sk) 556 break;
580 ) == -ERESTARTSYS) {
581 pr_debug("CAIF: %s():"
582 " wait_event_interruptible woken by a signal",
583 __func__);
584 ret = -ERESTARTSYS;
585 goto write_error_no_unlock;
586 } 557 }
587 558
588 /* I want to be alone on cf_sk (except status and queue) */ 559 if (cf_sk->layer.dn != NULL)
589 lock_sock(&(cf_sk->sk)); 560 ret = cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
590 561 if (likely(ret >= 0))
591 if (!STATE_IS_OPEN(cf_sk)) { 562 break;
592 /* someone closed the link, report error */ 563 /* if transmit return -EAGAIN, then retry */
593 pr_debug("CAIF: %s(): remote end shutdown!\n", 564 if (noblock && ret == -EAGAIN)
594 __func__); 565 break;
595 ret = -EPIPE; 566 timeo = caif_wait_for_flow_on(cf_sk, 0, timeo, &ret);
596 goto write_error; 567 if (signal_pending(current)) {
568 ret = sock_intr_errno(timeo);
569 break;
597 } 570 }
598 571 if (ret)
599 if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) { 572 break;
600 pr_debug("CAIF: %s(): " 573 if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
601 "received remote_shutdown indication\n", 574 sock_flag(&cf_sk->sk, SOCK_DEAD) ||
602 __func__); 575 (cf_sk->sk.sk_shutdown & RCV_SHUTDOWN)) {
603 ret = -ESHUTDOWN; 576 ret = -EPIPE;
604 goto write_error; 577 cf_sk->sk.sk_err = EPIPE;
578 break;
605 } 579 }
606 } 580 } while (ret == -EAGAIN);
581 return ret;
582}
607 583
608 pkt = cfpkt_create(payload_size); 584/* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */
609 skb = (struct sk_buff *)pkt; 585static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock,
610 skb->destructor = skb_destructor; 586 struct msghdr *msg, size_t len)
611 skb->sk = sk; 587{
612 dbfs_atomic_inc(&cnt.skb_alloc); 588 struct sock *sk = sock->sk;
613 dbfs_atomic_inc(&cnt.skb_in_use); 589 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
614 if (cfpkt_raw_append(pkt, (void **) &txbuf, payload_size) < 0) { 590 int buffer_size;
615 pr_debug("CAIF: %s(): cfpkt_raw_append failed\n", __func__); 591 int ret = 0;
616 cfpkt_destroy(pkt); 592 struct sk_buff *skb = NULL;
617 ret = -EINVAL; 593 int noblock;
618 goto write_error; 594 long timeo;
619 } 595 caif_assert(cf_sk);
596 ret = sock_error(sk);
597 if (ret)
598 goto err;
599
600 ret = -EOPNOTSUPP;
601 if (msg->msg_flags&MSG_OOB)
602 goto err;
603
604 ret = -EOPNOTSUPP;
605 if (msg->msg_namelen)
606 goto err;
607
608 ret = -EINVAL;
609 if (unlikely(msg->msg_iov->iov_base == NULL))
610 goto err;
611 noblock = msg->msg_flags & MSG_DONTWAIT;
612
613 buffer_size = len + CAIF_NEEDED_HEADROOM + CAIF_NEEDED_TAILROOM;
614
615 ret = -EMSGSIZE;
616 if (buffer_size > CAIF_MAX_PAYLOAD_SIZE)
617 goto err;
618
619 timeo = sock_sndtimeo(sk, noblock);
620 timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk),
621 1, timeo, &ret);
622
623 ret = -EPIPE;
624 if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
625 sock_flag(sk, SOCK_DEAD) ||
626 (sk->sk_shutdown & RCV_SHUTDOWN))
627 goto err;
628
629 ret = -ENOMEM;
630 skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret);
631 if (!skb)
632 goto err;
633 skb_reserve(skb, CAIF_NEEDED_HEADROOM);
634
635 ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
636
637 if (ret)
638 goto err;
639 ret = transmit_skb(skb, cf_sk, noblock, timeo);
640 if (ret < 0)
641 goto err;
642 return len;
643err:
644 kfree_skb(skb);
645 return ret;
646}
620 647
621 /* Copy data into buffer. */ 648/*
622 if (copy_from_user(txbuf, msg->msg_iov->iov_base, payload_size)) { 649 * Copied from unix_stream_sendmsg and adapted to CAIF:
623 pr_debug("CAIF: %s(): copy_from_user returned non zero.\n", 650 * Changed removed permission handling and added waiting for flow on
624 __func__); 651 * and other minor adaptations.
625 cfpkt_destroy(pkt); 652 */
626 ret = -EINVAL; 653static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
627 goto write_error; 654 struct msghdr *msg, size_t len)
628 } 655{
629 memset(&info, 0, sizeof(info)); 656 struct sock *sk = sock->sk;
657 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
658 int err, size;
659 struct sk_buff *skb;
660 int sent = 0;
661 long timeo;
630 662
631 /* Send the packet down the stack. */ 663 err = -EOPNOTSUPP;
632 caif_assert(cf_sk->layer.dn);
633 caif_assert(cf_sk->layer.dn->transmit);
634 664
635 do { 665 if (unlikely(msg->msg_flags&MSG_OOB))
636 ret = cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt); 666 goto out_err;
637 667
638 if (likely((ret >= 0) || (ret != -EAGAIN))) 668 if (unlikely(msg->msg_namelen))
639 break; 669 goto out_err;
640 670
641 /* EAGAIN - retry */ 671 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
642 if (msg->msg_flags & MSG_DONTWAIT) { 672 timeo = caif_wait_for_flow_on(cf_sk, 1, timeo, &err);
643 pr_debug("CAIF: %s(): NONBLOCK and transmit failed,"
644 " error = %ld\n", __func__, (long) ret);
645 ret = -EAGAIN;
646 goto write_error;
647 }
648 673
649 /* Let readers in */ 674 if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN))
650 release_sock(&cf_sk->sk); 675 goto pipe_err;
651 676
652 /* Wait until flow is on or socket is closed */ 677 while (sent < len) {
653 if (wait_event_interruptible(*cf_sk->sk.sk_sleep,
654 TX_FLOW_IS_ON(cf_sk)
655 || !STATE_IS_OPEN(cf_sk)
656 || STATE_IS_REMOTE_SHUTDOWN(cf_sk)
657 ) == -ERESTARTSYS) {
658 pr_debug("CAIF: %s(): wait_event_interruptible"
659 " woken by a signal", __func__);
660 ret = -ERESTARTSYS;
661 goto write_error_no_unlock;
662 }
663 678
664 /* I want to be alone on cf_sk (except status and queue) */ 679 size = len-sent;
665 lock_sock(&(cf_sk->sk));
666 680
667 } while (ret == -EAGAIN); 681 if (size > CAIF_MAX_PAYLOAD_SIZE)
682 size = CAIF_MAX_PAYLOAD_SIZE;
668 683
669 if (ret < 0) { 684 /* If size is more than half of sndbuf, chop up message */
670 cfpkt_destroy(pkt); 685 if (size > ((sk->sk_sndbuf >> 1) - 64))
671 pr_debug("CAIF: %s(): transmit failed, error = %ld\n", 686 size = (sk->sk_sndbuf >> 1) - 64;
672 __func__, (long) ret);
673 687
674 goto write_error; 688 if (size > SKB_MAX_ALLOC)
675 } 689 size = SKB_MAX_ALLOC;
676 690
677 release_sock(&cf_sk->sk); 691 skb = sock_alloc_send_skb(sk,
678 return payload_size; 692 size + CAIF_NEEDED_HEADROOM
693 + CAIF_NEEDED_TAILROOM,
694 msg->msg_flags&MSG_DONTWAIT,
695 &err);
696 if (skb == NULL)
697 goto out_err;
679 698
680write_error: 699 skb_reserve(skb, CAIF_NEEDED_HEADROOM);
681 release_sock(&cf_sk->sk); 700 /*
682write_error_no_unlock: 701 * If you pass two values to the sock_alloc_send_skb
683 return ret; 702 * it tries to grab the large buffer with GFP_NOFS
684} 703 * (which can fail easily), and if it fails grab the
704 * fallback size buffer which is under a page and will
705 * succeed. [Alan]
706 */
707 size = min_t(int, size, skb_tailroom(skb));
685 708
686static unsigned int caif_poll(struct file *file, struct socket *sock, 709 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
687 poll_table *wait) 710 if (err) {
688{ 711 kfree_skb(skb);
689 struct sock *sk = sock->sk; 712 goto out_err;
690 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 713 }
691 u32 mask = 0; 714 err = transmit_skb(skb, cf_sk,
692 poll_wait(file, sk->sk_sleep, wait); 715 msg->msg_flags&MSG_DONTWAIT, timeo);
693 lock_sock(&(cf_sk->sk)); 716 if (err < 0) {
694 if (!STATE_IS_OPEN(cf_sk)) { 717 kfree_skb(skb);
695 if (!STATE_IS_PENDING(cf_sk)) 718 goto pipe_err;
696 mask |= POLLHUP; 719 }
697 } else { 720 sent += size;
698 if (cfpkt_qpeek(cf_sk->pktq) != NULL)
699 mask |= (POLLIN | POLLRDNORM);
700 if (TX_FLOW_IS_ON(cf_sk))
701 mask |= (POLLOUT | POLLWRNORM);
702 } 721 }
703 release_sock(&cf_sk->sk);
704 trace_printk("CAIF: %s(): poll mask=0x%04x\n",
705 __func__, mask);
706 return mask;
707}
708
709static void drain_queue(struct caifsock *cf_sk)
710{
711 struct cfpkt *pkt = NULL;
712
713 /* Empty the queue */
714 do {
715 /* The queue has its own lock */
716 if (!cf_sk->pktq)
717 break;
718
719 pkt = cfpkt_dequeue(cf_sk->pktq);
720 if (!pkt)
721 break;
722 pr_debug("CAIF: %s(): freeing packet from read queue\n",
723 __func__);
724 cfpkt_destroy(pkt);
725 722
726 } while (1); 723 return sent;
727 724
728 cf_sk->read_queue_len = 0; 725pipe_err:
726 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
727 send_sig(SIGPIPE, current, 0);
728 err = -EPIPE;
729out_err:
730 return sent ? : err;
729} 731}
730 732
731static int setsockopt(struct socket *sock, 733static int setsockopt(struct socket *sock,
@@ -736,19 +738,13 @@ static int setsockopt(struct socket *sock,
736 int prio, linksel; 738 int prio, linksel;
737 struct ifreq ifreq; 739 struct ifreq ifreq;
738 740
739 if (STATE_IS_OPEN(cf_sk)) { 741 if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED)
740 pr_debug("CAIF: %s(): setsockopt "
741 "cannot be done on a connected socket\n",
742 __func__);
743 return -ENOPROTOOPT; 742 return -ENOPROTOOPT;
744 } 743
745 switch (opt) { 744 switch (opt) {
746 case CAIFSO_LINK_SELECT: 745 case CAIFSO_LINK_SELECT:
747 if (ol < sizeof(int)) { 746 if (ol < sizeof(int))
748 pr_debug("CAIF: %s(): setsockopt"
749 " CAIFSO_CHANNEL_CONFIG bad size\n", __func__);
750 return -EINVAL; 747 return -EINVAL;
751 }
752 if (lvl != SOL_CAIF) 748 if (lvl != SOL_CAIF)
753 goto bad_sol; 749 goto bad_sol;
754 if (copy_from_user(&linksel, ov, sizeof(int))) 750 if (copy_from_user(&linksel, ov, sizeof(int)))
@@ -761,28 +757,20 @@ static int setsockopt(struct socket *sock,
761 case SO_PRIORITY: 757 case SO_PRIORITY:
762 if (lvl != SOL_SOCKET) 758 if (lvl != SOL_SOCKET)
763 goto bad_sol; 759 goto bad_sol;
764 if (ol < sizeof(int)) { 760 if (ol < sizeof(int))
765 pr_debug("CAIF: %s(): setsockopt"
766 " SO_PRIORITY bad size\n", __func__);
767 return -EINVAL; 761 return -EINVAL;
768 }
769 if (copy_from_user(&prio, ov, sizeof(int))) 762 if (copy_from_user(&prio, ov, sizeof(int)))
770 return -EINVAL; 763 return -EINVAL;
771 lock_sock(&(cf_sk->sk)); 764 lock_sock(&(cf_sk->sk));
772 cf_sk->conn_req.priority = prio; 765 cf_sk->conn_req.priority = prio;
773 pr_debug("CAIF: %s(): Setting sockopt priority=%d\n", __func__,
774 cf_sk->conn_req.priority);
775 release_sock(&cf_sk->sk); 766 release_sock(&cf_sk->sk);
776 return 0; 767 return 0;
777 768
778 case SO_BINDTODEVICE: 769 case SO_BINDTODEVICE:
779 if (lvl != SOL_SOCKET) 770 if (lvl != SOL_SOCKET)
780 goto bad_sol; 771 goto bad_sol;
781 if (ol < sizeof(struct ifreq)) { 772 if (ol < sizeof(struct ifreq))
782 pr_debug("CAIF: %s(): setsockopt"
783 " SO_PRIORITY bad size\n", __func__);
784 return -EINVAL; 773 return -EINVAL;
785 }
786 if (copy_from_user(&ifreq, ov, sizeof(ifreq))) 774 if (copy_from_user(&ifreq, ov, sizeof(ifreq)))
787 return -EFAULT; 775 return -EFAULT;
788 lock_sock(&(cf_sk->sk)); 776 lock_sock(&(cf_sk->sk));
@@ -798,359 +786,275 @@ static int setsockopt(struct socket *sock,
798 goto bad_sol; 786 goto bad_sol;
799 if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) 787 if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
800 return -ENOPROTOOPT; 788 return -ENOPROTOOPT;
801 if (ol > sizeof(cf_sk->conn_req.param.data))
802 goto req_param_bad_size;
803
804 lock_sock(&(cf_sk->sk)); 789 lock_sock(&(cf_sk->sk));
805 cf_sk->conn_req.param.size = ol; 790 cf_sk->conn_req.param.size = ol;
806 if (copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { 791 if (ol > sizeof(cf_sk->conn_req.param.data) ||
792 copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) {
807 release_sock(&cf_sk->sk); 793 release_sock(&cf_sk->sk);
808req_param_bad_size:
809 pr_debug("CAIF: %s(): setsockopt"
810 " CAIFSO_CHANNEL_CONFIG bad size\n", __func__);
811 return -EINVAL; 794 return -EINVAL;
812 } 795 }
813
814 release_sock(&cf_sk->sk); 796 release_sock(&cf_sk->sk);
815 return 0; 797 return 0;
816 798
817 default: 799 default:
818 pr_debug("CAIF: %s(): unhandled option %d\n", __func__, opt); 800 return -ENOPROTOOPT;
819 return -EINVAL;
820 } 801 }
821 802
822 return 0; 803 return 0;
823bad_sol: 804bad_sol:
824 pr_debug("CAIF: %s(): setsockopt bad level\n", __func__);
825 return -ENOPROTOOPT; 805 return -ENOPROTOOPT;
826 806
827} 807}
828 808
829static int caif_connect(struct socket *sock, struct sockaddr *uservaddr, 809/*
830 int sockaddr_len, int flags) 810 * caif_connect() - Connect a CAIF Socket
811 * Copied and modified af_irda.c:irda_connect().
812 *
813 * Note : by consulting "errno", the user space caller may learn the cause
814 * of the failure. Most of them are visible in the function, others may come
815 * from subroutines called and are listed here :
816 * o -EAFNOSUPPORT: bad socket family or type.
817 * o -ESOCKTNOSUPPORT: bad socket type or protocol
818 * o -EINVAL: bad socket address, or CAIF link type
819 * o -ECONNREFUSED: remote end refused the connection.
820 * o -EINPROGRESS: connect request sent but timed out (or non-blocking)
821 * o -EISCONN: already connected.
822 * o -ETIMEDOUT: Connection timed out (send timeout)
823 * o -ENODEV: No link layer to send request
824 * o -ECONNRESET: Received Shutdown indication or lost link layer
825 * o -ENOMEM: Out of memory
826 *
827 * State Strategy:
828 * o sk_state: holds the CAIF_* protocol state, it's updated by
829 * caif_ctrl_cb.
830 * o sock->state: holds the SS_* socket state and is updated by connect and
831 * disconnect.
832 */
833static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
834 int addr_len, int flags)
831{ 835{
832 struct caifsock *cf_sk = NULL;
833 int result = -1;
834 int mode = 0;
835 int ret = -EIO;
836 struct sock *sk = sock->sk; 836 struct sock *sk = sock->sk;
837 BUG_ON(sk == NULL); 837 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
838 838 long timeo;
839 cf_sk = container_of(sk, struct caifsock, sk); 839 int err;
840 840 lock_sock(sk);
841 trace_printk("CAIF: %s(): cf_sk=%p OPEN=%d, TX_FLOW=%d, RX_FLOW=%d\n",
842 __func__, cf_sk,
843 STATE_IS_OPEN(cf_sk),
844 TX_FLOW_IS_ON(cf_sk), RX_FLOW_IS_ON(cf_sk));
845
846 841
847 if (sock->type == SOCK_SEQPACKET || sock->type == SOCK_STREAM) 842 err = -EAFNOSUPPORT;
848 sock->state = SS_CONNECTING; 843 if (uaddr->sa_family != AF_CAIF)
849 else
850 goto out; 844 goto out;
851 845
852 /* I want to be alone on cf_sk (except status and queue) */ 846 err = -ESOCKTNOSUPPORT;
853 lock_sock(&(cf_sk->sk)); 847 if (unlikely(!(sk->sk_type == SOCK_STREAM &&
854 848 cf_sk->sk.sk_protocol == CAIFPROTO_AT) &&
855 if (sockaddr_len != sizeof(struct sockaddr_caif)) { 849 sk->sk_type != SOCK_SEQPACKET))
856 pr_debug("CAIF: %s(): Bad address len (%ld,%lu)\n", 850 goto out;
857 __func__, (long) sockaddr_len, 851 switch (sock->state) {
858 (long unsigned) sizeof(struct sockaddr_caif)); 852 case SS_UNCONNECTED:
859 ret = -EINVAL; 853 /* Normal case, a fresh connect */
860 goto open_error; 854 caif_assert(sk->sk_state == CAIF_DISCONNECTED);
855 break;
856 case SS_CONNECTING:
857 switch (sk->sk_state) {
858 case CAIF_CONNECTED:
859 sock->state = SS_CONNECTED;
860 err = -EISCONN;
861 goto out;
862 case CAIF_DISCONNECTED:
863 /* Reconnect allowed */
864 break;
865 case CAIF_CONNECTING:
866 err = -EALREADY;
867 if (flags & O_NONBLOCK)
868 goto out;
869 goto wait_connect;
870 }
871 break;
872 case SS_CONNECTED:
873 caif_assert(sk->sk_state == CAIF_CONNECTED ||
874 sk->sk_state == CAIF_DISCONNECTED);
875 if (sk->sk_shutdown & SHUTDOWN_MASK) {
876 /* Allow re-connect after SHUTDOWN_IND */
877 caif_disconnect_client(&cf_sk->layer);
878 break;
879 }
880 /* No reconnect on a seqpacket socket */
881 err = -EISCONN;
882 goto out;
883 case SS_DISCONNECTING:
884 case SS_FREE:
885 caif_assert(1); /*Should never happen */
886 break;
861 } 887 }
888 sk->sk_state = CAIF_DISCONNECTED;
889 sock->state = SS_UNCONNECTED;
890 sk_stream_kill_queues(&cf_sk->sk);
862 891
863 if (uservaddr->sa_family != AF_CAIF) { 892 err = -EINVAL;
864 pr_debug("CAIF: %s(): Bad address family (%d)\n", 893 if (addr_len != sizeof(struct sockaddr_caif) ||
865 __func__, uservaddr->sa_family); 894 !uaddr)
866 ret = -EAFNOSUPPORT; 895 goto out;
867 goto open_error;
868 }
869 896
870 memcpy(&cf_sk->conn_req.sockaddr, uservaddr, 897 memcpy(&cf_sk->conn_req.sockaddr, uaddr,
871 sizeof(struct sockaddr_caif)); 898 sizeof(struct sockaddr_caif));
872 899
873 dbfs_atomic_inc(&cnt.num_open); 900 /* Move to connecting socket, start sending Connect Requests */
874 mode = SKT_READ_FLAG | SKT_WRITE_FLAG; 901 sock->state = SS_CONNECTING;
875 902 sk->sk_state = CAIF_CONNECTING;
876 /* If socket is not open, make sure socket is in fully closed state */ 903
877 if (!STATE_IS_OPEN(cf_sk)) { 904 dbfs_atomic_inc(&cnt.num_connect_req);
878 /* Has link close response been received (if we ever sent it)?*/ 905 cf_sk->layer.receive = caif_sktrecv_cb;
879 if (STATE_IS_PENDING(cf_sk)) { 906 err = caif_connect_client(&cf_sk->conn_req,
880 /* 907 &cf_sk->layer);
881 * Still waiting for close response from remote. 908 if (err < 0) {
882 * If opened non-blocking, report "would block" 909 cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
883 */ 910 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
884 if (flags & O_NONBLOCK) { 911 goto out;
885 pr_debug("CAIF: %s(): O_NONBLOCK"
886 " && close pending\n", __func__);
887 ret = -EAGAIN;
888 goto open_error;
889 }
890
891 pr_debug("CAIF: %s(): Wait for close response"
892 " from remote...\n", __func__);
893
894 release_sock(&cf_sk->sk);
895
896 /*
897 * Blocking mode; close is pending and we need to wait
898 * for its conclusion.
899 */
900 result =
901 wait_event_interruptible(*cf_sk->sk.sk_sleep,
902 !STATE_IS_PENDING(cf_sk));
903
904 lock_sock(&(cf_sk->sk));
905 if (result == -ERESTARTSYS) {
906 pr_debug("CAIF: %s(): wait_event_interruptible"
907 "woken by a signal (1)", __func__);
908 ret = -ERESTARTSYS;
909 goto open_error;
910 }
911 }
912 } 912 }
913 913
914 /* socket is now either closed, pending open or open */ 914 err = -EINPROGRESS;
915 if (STATE_IS_OPEN(cf_sk) && !STATE_IS_PENDING(cf_sk)) { 915wait_connect:
916 /* Open */
917 pr_debug("CAIF: %s(): Socket is already opened (cf_sk=%p)"
918 " check access f_flags = 0x%x file_mode = 0x%x\n",
919 __func__, cf_sk, mode, cf_sk->file_mode);
920
921 } else {
922 /* We are closed or pending open.
923 * If closed: send link setup
924 * If pending open: link setup already sent (we could have been
925 * interrupted by a signal last time)
926 */
927 if (!STATE_IS_OPEN(cf_sk)) {
928 /* First opening of file; connect lower layers: */
929 /* Drain queue (very unlikely) */
930 drain_queue(cf_sk);
931
932 cf_sk->layer.receive = caif_sktrecv_cb;
933 SET_STATE_OPEN(cf_sk);
934 SET_PENDING_ON(cf_sk);
935
936 /* Register this channel. */
937 result =
938 caif_connect_client(&cf_sk->conn_req,
939 &cf_sk->layer);
940 if (result < 0) {
941 pr_debug("CAIF: %s(): can't register channel\n",
942 __func__);
943 ret = -EIO;
944 SET_STATE_CLOSED(cf_sk);
945 SET_PENDING_OFF(cf_sk);
946 goto open_error;
947 }
948 dbfs_atomic_inc(&cnt.num_init);
949 }
950
951 /* If opened non-blocking, report "success".
952 */
953 if (flags & O_NONBLOCK) {
954 pr_debug("CAIF: %s(): O_NONBLOCK success\n",
955 __func__);
956 ret = -EINPROGRESS;
957 cf_sk->sk.sk_err = -EINPROGRESS;
958 goto open_error;
959 }
960
961 trace_printk("CAIF: %s(): Wait for connect response\n",
962 __func__);
963 916
964 /* release lock before waiting */ 917 if (sk->sk_state != CAIF_CONNECTED && (flags & O_NONBLOCK))
965 release_sock(&cf_sk->sk); 918 goto out;
966
967 result =
968 wait_event_interruptible(*cf_sk->sk.sk_sleep,
969 !STATE_IS_PENDING(cf_sk));
970
971 lock_sock(&(cf_sk->sk));
972
973 if (result == -ERESTARTSYS) {
974 pr_debug("CAIF: %s(): wait_event_interruptible"
975 "woken by a signal (2)", __func__);
976 ret = -ERESTARTSYS;
977 goto open_error;
978 }
979
980 if (!STATE_IS_OPEN(cf_sk)) {
981 /* Lower layers said "no" */
982 pr_debug("CAIF: %s(): Closed received\n", __func__);
983 ret = -EPIPE;
984 goto open_error;
985 }
986 919
987 trace_printk("CAIF: %s(): Connect received\n", __func__); 920 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
921
922 release_sock(sk);
923 err = wait_event_interruptible_timeout(*sk_sleep(sk),
924 sk->sk_state != CAIF_CONNECTING,
925 timeo);
926 lock_sock(sk);
927 if (err < 0)
928 goto out; /* -ERESTARTSYS */
929 if (err == 0 && sk->sk_state != CAIF_CONNECTED) {
930 err = -ETIMEDOUT;
931 goto out;
988 } 932 }
989 /* Open is ok */
990 cf_sk->file_mode |= mode;
991 933
992 trace_printk("CAIF: %s(): Connected - file mode = %x\n", 934 if (sk->sk_state != CAIF_CONNECTED) {
993 __func__, cf_sk->file_mode); 935 sock->state = SS_UNCONNECTED;
994 936 err = sock_error(sk);
995 release_sock(&cf_sk->sk); 937 if (!err)
996 return 0; 938 err = -ECONNREFUSED;
997open_error: 939 goto out;
998 sock->state = SS_UNCONNECTED; 940 }
999 release_sock(&cf_sk->sk); 941 sock->state = SS_CONNECTED;
942 err = 0;
1000out: 943out:
1001 return ret; 944 release_sock(sk);
945 return err;
1002} 946}
1003 947
1004static int caif_shutdown(struct socket *sock, int how) 948
949/*
950 * caif_release() - Disconnect a CAIF Socket
951 * Copied and modified af_irda.c:irda_release().
952 */
953static int caif_release(struct socket *sock)
1005{ 954{
1006 struct caifsock *cf_sk = NULL;
1007 int result = 0;
1008 int tx_flow_state_was_on;
1009 struct sock *sk = sock->sk; 955 struct sock *sk = sock->sk;
956 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
957 int res = 0;
1010 958
1011 trace_printk("CAIF: %s(): enter\n", __func__); 959 if (!sk)
1012 pr_debug("f_flags=%x\n", sock->file->f_flags); 960 return 0;
1013
1014 if (how != SHUT_RDWR)
1015 return -EOPNOTSUPP;
1016
1017 cf_sk = container_of(sk, struct caifsock, sk);
1018 if (cf_sk == NULL) {
1019 pr_debug("CAIF: %s(): COULD NOT FIND SOCKET\n", __func__);
1020 return -EBADF;
1021 }
1022
1023 /* I want to be alone on cf_sk (except status queue) */
1024 lock_sock(&(cf_sk->sk));
1025 sock_hold(&cf_sk->sk);
1026
1027 /* IS_CLOSED have double meaning:
1028 * 1) Spontanous Remote Shutdown Request.
1029 * 2) Ack on a channel teardown(disconnect)
1030 * Must clear bit in case we previously received
1031 * remote shudown request.
1032 */
1033 if (STATE_IS_OPEN(cf_sk) && !STATE_IS_PENDING(cf_sk)) {
1034 SET_STATE_CLOSED(cf_sk);
1035 SET_PENDING_ON(cf_sk);
1036 tx_flow_state_was_on = TX_FLOW_IS_ON(cf_sk);
1037 SET_TX_FLOW_OFF(cf_sk);
1038
1039 /* Hold the socket until DEINIT_RSP is received */
1040 sock_hold(&cf_sk->sk);
1041 result = caif_disconnect_client(&cf_sk->layer);
1042
1043 if (result < 0) {
1044 pr_debug("CAIF: %s(): "
1045 "caif_disconnect_client() failed\n",
1046 __func__);
1047 SET_STATE_CLOSED(cf_sk);
1048 SET_PENDING_OFF(cf_sk);
1049 SET_TX_FLOW_OFF(cf_sk);
1050 release_sock(&cf_sk->sk);
1051 sock_put(&cf_sk->sk);
1052 return -EIO;
1053 }
1054 961
1055 } 962 set_tx_flow_off(cf_sk);
1056 if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) {
1057 SET_PENDING_OFF(cf_sk);
1058 SET_REMOTE_SHUTDOWN_OFF(cf_sk);
1059 }
1060 963
1061 /* 964 /*
1062 * Socket is no longer in state pending close, 965 * Ensure that packets are not queued after this point in time.
1063 * and we can release the reference. 966 * caif_queue_rcv_skb checks SOCK_DEAD holding the queue lock,
967 * this ensures no packets when sock is dead.
1064 */ 968 */
969 spin_lock(&sk->sk_receive_queue.lock);
970 sock_set_flag(sk, SOCK_DEAD);
971 spin_unlock(&sk->sk_receive_queue.lock);
972 sock->sk = NULL;
1065 973
1066 dbfs_atomic_inc(&cnt.num_close); 974 dbfs_atomic_inc(&cnt.num_disconnect);
1067 drain_queue(cf_sk);
1068 SET_RX_FLOW_ON(cf_sk);
1069 cf_sk->file_mode = 0;
1070 sock_put(&cf_sk->sk);
1071 release_sock(&cf_sk->sk);
1072 if (!result && (sock->file->f_flags & O_NONBLOCK)) {
1073 pr_debug("nonblocking shutdown returing -EAGAIN\n");
1074 return -EAGAIN;
1075 } else
1076 return result;
1077}
1078
1079static ssize_t caif_sock_no_sendpage(struct socket *sock,
1080 struct page *page,
1081 int offset, size_t size, int flags)
1082{
1083 return -EOPNOTSUPP;
1084}
1085
1086/* This function is called as part of close. */
1087static int caif_release(struct socket *sock)
1088{
1089 struct sock *sk = sock->sk;
1090 struct caifsock *cf_sk = NULL;
1091 int res;
1092 caif_assert(sk != NULL);
1093 cf_sk = container_of(sk, struct caifsock, sk);
1094 975
1095 if (cf_sk->debugfs_socket_dir != NULL) 976 if (cf_sk->debugfs_socket_dir != NULL)
1096 debugfs_remove_recursive(cf_sk->debugfs_socket_dir); 977 debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
1097 978
1098 res = caif_shutdown(sock, SHUT_RDWR);
1099 if (res && res != -EINPROGRESS)
1100 return res;
1101
1102 /*
1103 * FIXME: Shutdown should probably be possible to do async
1104 * without flushing queues, allowing reception of frames while
1105 * waiting for DEINIT_IND.
1106 * Release should always block, to allow secure decoupling of
1107 * CAIF stack.
1108 */
1109 if (!(sock->file->f_flags & O_NONBLOCK)) {
1110 res = wait_event_interruptible(*cf_sk->sk.sk_sleep,
1111 !STATE_IS_PENDING(cf_sk));
1112
1113 if (res == -ERESTARTSYS) {
1114 pr_debug("CAIF: %s(): wait_event_interruptible"
1115 "woken by a signal (1)", __func__);
1116 }
1117 }
1118 lock_sock(&(cf_sk->sk)); 979 lock_sock(&(cf_sk->sk));
980 sk->sk_state = CAIF_DISCONNECTED;
981 sk->sk_shutdown = SHUTDOWN_MASK;
1119 982
1120 sock->sk = NULL; 983 if (cf_sk->sk.sk_socket->state == SS_CONNECTED ||
984 cf_sk->sk.sk_socket->state == SS_CONNECTING)
985 res = caif_disconnect_client(&cf_sk->layer);
1121 986
1122 /* Detach the socket from its process context by making it orphan. */ 987 cf_sk->sk.sk_socket->state = SS_DISCONNECTING;
1123 sock_orphan(sk); 988 wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP);
1124 989
1125 /* 990 sock_orphan(sk);
1126 * Setting SHUTDOWN_MASK means that both send and receive are shutdown 991 cf_sk->layer.dn = NULL;
1127 * for the socket. 992 sk_stream_kill_queues(&cf_sk->sk);
1128 */ 993 release_sock(sk);
1129 sk->sk_shutdown = SHUTDOWN_MASK; 994 sock_put(sk);
995 return res;
996}
1130 997
1131 /* 998/* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */
1132 * Set the socket state to closed, the TCP_CLOSE macro is used when 999static unsigned int caif_poll(struct file *file,
1133 * closing any socket. 1000 struct socket *sock, poll_table *wait)
1134 */ 1001{
1002 struct sock *sk = sock->sk;
1003 unsigned int mask;
1004 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
1135 1005
1136 /* Flush out this sockets receive queue. */ 1006 sock_poll_wait(file, sk_sleep(sk), wait);
1137 drain_queue(cf_sk); 1007 mask = 0;
1138 1008
1139 /* Finally release the socket. */ 1009 /* exceptional events? */
1140 SET_STATE_PENDING_DESTROY(cf_sk); 1010 if (sk->sk_err)
1011 mask |= POLLERR;
1012 if (sk->sk_shutdown == SHUTDOWN_MASK)
1013 mask |= POLLHUP;
1014 if (sk->sk_shutdown & RCV_SHUTDOWN)
1015 mask |= POLLRDHUP;
1141 1016
1142 release_sock(&cf_sk->sk); 1017 /* readable? */
1018 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1019 (sk->sk_shutdown & RCV_SHUTDOWN))
1020 mask |= POLLIN | POLLRDNORM;
1143 1021
1144 sock_put(sk); 1022 /* Connection-based need to check for termination and startup */
1023 if (sk->sk_state == CAIF_DISCONNECTED)
1024 mask |= POLLHUP;
1145 1025
1146 /* 1026 /*
1147 * The rest of the cleanup will be handled from the 1027 * we set writable also when the other side has shut down the
1148 * caif_sock_destructor 1028 * connection. This prevents stuck sockets.
1149 */ 1029 */
1150 return res; 1030 if (sock_writeable(sk) && tx_flow_is_on(cf_sk))
1031 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1032
1033 return mask;
1151} 1034}
1152 1035
1153static const struct proto_ops caif_ops = { 1036static const struct proto_ops caif_seqpacket_ops = {
1037 .family = PF_CAIF,
1038 .owner = THIS_MODULE,
1039 .release = caif_release,
1040 .bind = sock_no_bind,
1041 .connect = caif_connect,
1042 .socketpair = sock_no_socketpair,
1043 .accept = sock_no_accept,
1044 .getname = sock_no_getname,
1045 .poll = caif_poll,
1046 .ioctl = sock_no_ioctl,
1047 .listen = sock_no_listen,
1048 .shutdown = sock_no_shutdown,
1049 .setsockopt = setsockopt,
1050 .getsockopt = sock_no_getsockopt,
1051 .sendmsg = caif_seqpkt_sendmsg,
1052 .recvmsg = caif_seqpkt_recvmsg,
1053 .mmap = sock_no_mmap,
1054 .sendpage = sock_no_sendpage,
1055};
1056
1057static const struct proto_ops caif_stream_ops = {
1154 .family = PF_CAIF, 1058 .family = PF_CAIF,
1155 .owner = THIS_MODULE, 1059 .owner = THIS_MODULE,
1156 .release = caif_release, 1060 .release = caif_release,
@@ -1162,73 +1066,62 @@ static const struct proto_ops caif_ops = {
1162 .poll = caif_poll, 1066 .poll = caif_poll,
1163 .ioctl = sock_no_ioctl, 1067 .ioctl = sock_no_ioctl,
1164 .listen = sock_no_listen, 1068 .listen = sock_no_listen,
1165 .shutdown = caif_shutdown, 1069 .shutdown = sock_no_shutdown,
1166 .setsockopt = setsockopt, 1070 .setsockopt = setsockopt,
1167 .getsockopt = sock_no_getsockopt, 1071 .getsockopt = sock_no_getsockopt,
1168 .sendmsg = caif_sendmsg, 1072 .sendmsg = caif_stream_sendmsg,
1169 .recvmsg = caif_recvmsg, 1073 .recvmsg = caif_stream_recvmsg,
1170 .mmap = sock_no_mmap, 1074 .mmap = sock_no_mmap,
1171 .sendpage = caif_sock_no_sendpage, 1075 .sendpage = sock_no_sendpage,
1172}; 1076};
1173 1077
1174/* This function is called when a socket is finally destroyed. */ 1078/* This function is called when a socket is finally destroyed. */
1175static void caif_sock_destructor(struct sock *sk) 1079static void caif_sock_destructor(struct sock *sk)
1176{ 1080{
1177 struct caifsock *cf_sk = NULL; 1081 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
1178 cf_sk = container_of(sk, struct caifsock, sk);
1179 /* Error checks. */
1180 caif_assert(!atomic_read(&sk->sk_wmem_alloc)); 1082 caif_assert(!atomic_read(&sk->sk_wmem_alloc));
1181 caif_assert(sk_unhashed(sk)); 1083 caif_assert(sk_unhashed(sk));
1182 caif_assert(!sk->sk_socket); 1084 caif_assert(!sk->sk_socket);
1183 if (!sock_flag(sk, SOCK_DEAD)) { 1085 if (!sock_flag(sk, SOCK_DEAD)) {
1184 pr_debug("CAIF: %s(): 0x%p", __func__, sk); 1086 pr_info("Attempt to release alive CAIF socket: %p\n", sk);
1185 return; 1087 return;
1186 } 1088 }
1187 1089 sk_stream_kill_queues(&cf_sk->sk);
1188 if (STATE_IS_OPEN(cf_sk)) { 1090 dbfs_atomic_dec(&cnt.caif_nr_socks);
1189 pr_debug("CAIF: %s(): socket is opened (cf_sk=%p)"
1190 " file_mode = 0x%x\n", __func__,
1191 cf_sk, cf_sk->file_mode);
1192 return;
1193 }
1194 drain_queue(cf_sk);
1195 kfree(cf_sk->pktq);
1196
1197 trace_printk("CAIF: %s(): caif_sock_destructor: Removing socket %s\n",
1198 __func__, cf_sk->name);
1199 atomic_dec(&caif_nr_socks);
1200} 1091}
1201 1092
1202static int caif_create(struct net *net, struct socket *sock, int protocol, 1093static int caif_create(struct net *net, struct socket *sock, int protocol,
1203 int kern) 1094 int kern)
1204{ 1095{
1205 struct sock *sk = NULL; 1096 struct sock *sk = NULL;
1206 struct caifsock *cf_sk = NULL; 1097 struct caifsock *cf_sk = NULL;
1207 int result = 0;
1208 static struct proto prot = {.name = "PF_CAIF", 1098 static struct proto prot = {.name = "PF_CAIF",
1209 .owner = THIS_MODULE, 1099 .owner = THIS_MODULE,
1210 .obj_size = sizeof(struct caifsock), 1100 .obj_size = sizeof(struct caifsock),
1211 }; 1101 };
1212 1102
1103 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_NET_ADMIN))
1104 return -EPERM;
1213 /* 1105 /*
1214 * The sock->type specifies the socket type to use. 1106 * The sock->type specifies the socket type to use.
1215 * in SEQPACKET mode packet boundaries are enforced. 1107 * The CAIF socket is a packet stream in the sense
1108 * that it is packet based. CAIF trusts the reliability
1109 * of the link, no resending is implemented.
1216 */ 1110 */
1217 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM) 1111 if (sock->type == SOCK_SEQPACKET)
1112 sock->ops = &caif_seqpacket_ops;
1113 else if (sock->type == SOCK_STREAM)
1114 sock->ops = &caif_stream_ops;
1115 else
1218 return -ESOCKTNOSUPPORT; 1116 return -ESOCKTNOSUPPORT;
1219 1117
1220 if (net != &init_net)
1221 return -EAFNOSUPPORT;
1222
1223 if (protocol < 0 || protocol >= CAIFPROTO_MAX) 1118 if (protocol < 0 || protocol >= CAIFPROTO_MAX)
1224 return -EPROTONOSUPPORT; 1119 return -EPROTONOSUPPORT;
1225 /* 1120 /*
1226 * Set the socket state to unconnected. The socket state is really 1121 * Set the socket state to unconnected. The socket state
1227 * not used at all in the net/core or socket.c but the 1122 * is really not used at all in the net/core or socket.c but the
1228 * initialization makes sure that sock->state is not uninitialized. 1123 * initialization makes sure that sock->state is not uninitialized.
1229 */ 1124 */
1230 sock->state = SS_UNCONNECTED;
1231
1232 sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot); 1125 sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot);
1233 if (!sk) 1126 if (!sk)
1234 return -ENOMEM; 1127 return -ENOMEM;
@@ -1238,11 +1131,9 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
1238 /* Store the protocol */ 1131 /* Store the protocol */
1239 sk->sk_protocol = (unsigned char) protocol; 1132 sk->sk_protocol = (unsigned char) protocol;
1240 1133
1241 spin_lock_init(&cf_sk->read_queue_len_lock); 1134 /* Sendbuf dictates the amount of outbound packets not yet sent */
1242 1135 sk->sk_sndbuf = CAIF_DEF_SNDBUF;
1243 /* Fill in some information concerning the misc socket. */ 1136 sk->sk_rcvbuf = CAIF_DEF_RCVBUF;
1244 snprintf(cf_sk->name, sizeof(cf_sk->name), "cf_sk%d",
1245 atomic_read(&caif_nr_socks));
1246 1137
1247 /* 1138 /*
1248 * Lock in order to try to stop someone from opening the socket 1139 * Lock in order to try to stop someone from opening the socket
@@ -1252,108 +1143,85 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
1252 1143
1253 /* Initialize the nozero default sock structure data. */ 1144 /* Initialize the nozero default sock structure data. */
1254 sock_init_data(sock, sk); 1145 sock_init_data(sock, sk);
1255 sock->ops = &caif_ops;
1256 sk->sk_destruct = caif_sock_destructor; 1146 sk->sk_destruct = caif_sock_destructor;
1257 sk->sk_sndbuf = caif_sockbuf_size;
1258 sk->sk_rcvbuf = caif_sockbuf_size;
1259 1147
1260 cf_sk->pktq = cfpktq_create(); 1148 mutex_init(&cf_sk->readlock); /* single task reading lock */
1149 cf_sk->layer.ctrlcmd = caif_ctrl_cb;
1150 cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
1151 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
1261 1152
1262 if (!cf_sk->pktq) { 1153 set_tx_flow_off(cf_sk);
1263 pr_err("CAIF: %s(): queue create failed.\n", __func__); 1154 set_rx_flow_on(cf_sk);
1264 result = -ENOMEM;
1265 release_sock(&cf_sk->sk);
1266 goto err_failed;
1267 }
1268 cf_sk->layer.ctrlcmd = caif_sktflowctrl_cb;
1269 SET_STATE_CLOSED(cf_sk);
1270 SET_PENDING_OFF(cf_sk);
1271 SET_TX_FLOW_OFF(cf_sk);
1272 SET_RX_FLOW_ON(cf_sk);
1273 1155
1274 /* Set default options on configuration */ 1156 /* Set default options on configuration */
1275 cf_sk->conn_req.priority = CAIF_PRIO_NORMAL; 1157 cf_sk->conn_req.priority = CAIF_PRIO_NORMAL;
1276 cf_sk->conn_req.link_selector = CAIF_LINK_HIGH_BANDW; 1158 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
1277 cf_sk->conn_req.protocol = protocol; 1159 cf_sk->conn_req.protocol = protocol;
1278 /* Increase the number of sockets created. */ 1160 /* Increase the number of sockets created. */
1279 atomic_inc(&caif_nr_socks); 1161 dbfs_atomic_inc(&cnt.caif_nr_socks);
1162#ifdef CONFIG_DEBUG_FS
1280 if (!IS_ERR(debugfsdir)) { 1163 if (!IS_ERR(debugfsdir)) {
1164 /* Fill in some information concerning the misc socket. */
1165 snprintf(cf_sk->name, sizeof(cf_sk->name), "cfsk%d",
1166 atomic_read(&cnt.caif_nr_socks));
1167
1281 cf_sk->debugfs_socket_dir = 1168 cf_sk->debugfs_socket_dir =
1282 debugfs_create_dir(cf_sk->name, debugfsdir); 1169 debugfs_create_dir(cf_sk->name, debugfsdir);
1283 debugfs_create_u32("conn_state", S_IRUSR | S_IWUSR, 1170 debugfs_create_u32("sk_state", S_IRUSR | S_IWUSR,
1284 cf_sk->debugfs_socket_dir, &cf_sk->conn_state); 1171 cf_sk->debugfs_socket_dir,
1172 (u32 *) &cf_sk->sk.sk_state);
1285 debugfs_create_u32("flow_state", S_IRUSR | S_IWUSR, 1173 debugfs_create_u32("flow_state", S_IRUSR | S_IWUSR,
1286 cf_sk->debugfs_socket_dir, &cf_sk->flow_state); 1174 cf_sk->debugfs_socket_dir, &cf_sk->flow_state);
1287 debugfs_create_u32("read_queue_len", S_IRUSR | S_IWUSR, 1175 debugfs_create_u32("sk_rmem_alloc", S_IRUSR | S_IWUSR,
1176 cf_sk->debugfs_socket_dir,
1177 (u32 *) &cf_sk->sk.sk_rmem_alloc);
1178 debugfs_create_u32("sk_wmem_alloc", S_IRUSR | S_IWUSR,
1288 cf_sk->debugfs_socket_dir, 1179 cf_sk->debugfs_socket_dir,
1289 (u32 *) &cf_sk->read_queue_len); 1180 (u32 *) &cf_sk->sk.sk_wmem_alloc);
1290 debugfs_create_u32("identity", S_IRUSR | S_IWUSR, 1181 debugfs_create_u32("identity", S_IRUSR | S_IWUSR,
1291 cf_sk->debugfs_socket_dir, 1182 cf_sk->debugfs_socket_dir,
1292 (u32 *) &cf_sk->layer.id); 1183 (u32 *) &cf_sk->layer.id);
1293 } 1184 }
1185#endif
1294 release_sock(&cf_sk->sk); 1186 release_sock(&cf_sk->sk);
1295 return 0; 1187 return 0;
1296err_failed:
1297 sk_free(sk);
1298 return result;
1299} 1188}
1300 1189
1190
1301static struct net_proto_family caif_family_ops = { 1191static struct net_proto_family caif_family_ops = {
1302 .family = PF_CAIF, 1192 .family = PF_CAIF,
1303 .create = caif_create, 1193 .create = caif_create,
1304 .owner = THIS_MODULE, 1194 .owner = THIS_MODULE,
1305}; 1195};
1306 1196
1307static int af_caif_init(void) 1197int af_caif_init(void)
1308{ 1198{
1309 int err; 1199 int err = sock_register(&caif_family_ops);
1310 err = sock_register(&caif_family_ops);
1311
1312 if (!err) 1200 if (!err)
1313 return err; 1201 return err;
1314
1315 return 0; 1202 return 0;
1316} 1203}
1317 1204
1318static int __init caif_sktinit_module(void) 1205static int __init caif_sktinit_module(void)
1319{ 1206{
1320 int stat;
1321#ifdef CONFIG_DEBUG_FS 1207#ifdef CONFIG_DEBUG_FS
1322 debugfsdir = debugfs_create_dir("chnl_skt", NULL); 1208 debugfsdir = debugfs_create_dir("caif_sk", NULL);
1323 if (!IS_ERR(debugfsdir)) { 1209 if (!IS_ERR(debugfsdir)) {
1324 debugfs_create_u32("skb_inuse", S_IRUSR | S_IWUSR,
1325 debugfsdir,
1326 (u32 *) &cnt.skb_in_use);
1327 debugfs_create_u32("skb_alloc", S_IRUSR | S_IWUSR,
1328 debugfsdir,
1329 (u32 *) &cnt.skb_alloc);
1330 debugfs_create_u32("skb_free", S_IRUSR | S_IWUSR,
1331 debugfsdir,
1332 (u32 *) &cnt.skb_free);
1333 debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR, 1210 debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR,
1334 debugfsdir, 1211 debugfsdir,
1335 (u32 *) &caif_nr_socks); 1212 (u32 *) &cnt.caif_nr_socks);
1336 debugfs_create_u32("num_open", S_IRUSR | S_IWUSR, 1213 debugfs_create_u32("num_connect_req", S_IRUSR | S_IWUSR,
1337 debugfsdir, 1214 debugfsdir,
1338 (u32 *) &cnt.num_open); 1215 (u32 *) &cnt.num_connect_req);
1339 debugfs_create_u32("num_close", S_IRUSR | S_IWUSR, 1216 debugfs_create_u32("num_connect_resp", S_IRUSR | S_IWUSR,
1340 debugfsdir, 1217 debugfsdir,
1341 (u32 *) &cnt.num_close); 1218 (u32 *) &cnt.num_connect_resp);
1342 debugfs_create_u32("num_init", S_IRUSR | S_IWUSR, 1219 debugfs_create_u32("num_connect_fail_resp", S_IRUSR | S_IWUSR,
1343 debugfsdir, 1220 debugfsdir,
1344 (u32 *) &cnt.num_init); 1221 (u32 *) &cnt.num_connect_fail_resp);
1345 debugfs_create_u32("num_init_resp", S_IRUSR | S_IWUSR, 1222 debugfs_create_u32("num_disconnect", S_IRUSR | S_IWUSR,
1346 debugfsdir, 1223 debugfsdir,
1347 (u32 *) &cnt.num_init_resp); 1224 (u32 *) &cnt.num_disconnect);
1348 debugfs_create_u32("num_init_fail_resp", S_IRUSR | S_IWUSR,
1349 debugfsdir,
1350 (u32 *) &cnt.num_init_fail_resp);
1351 debugfs_create_u32("num_deinit", S_IRUSR | S_IWUSR,
1352 debugfsdir,
1353 (u32 *) &cnt.num_deinit);
1354 debugfs_create_u32("num_deinit_resp", S_IRUSR | S_IWUSR,
1355 debugfsdir,
1356 (u32 *) &cnt.num_deinit_resp);
1357 debugfs_create_u32("num_remote_shutdown_ind", 1225 debugfs_create_u32("num_remote_shutdown_ind",
1358 S_IRUSR | S_IWUSR, debugfsdir, 1226 S_IRUSR | S_IWUSR, debugfsdir,
1359 (u32 *) &cnt.num_remote_shutdown_ind); 1227 (u32 *) &cnt.num_remote_shutdown_ind);
@@ -1371,13 +1239,7 @@ static int __init caif_sktinit_module(void)
1371 (u32 *) &cnt.num_rx_flow_on); 1239 (u32 *) &cnt.num_rx_flow_on);
1372 } 1240 }
1373#endif 1241#endif
1374 stat = af_caif_init(); 1242 return af_caif_init();
1375 if (stat) {
1376 pr_err("CAIF: %s(): Failed to initialize CAIF socket layer.",
1377 __func__);
1378 return stat;
1379 }
1380 return 0;
1381} 1243}
1382 1244
1383static void __exit caif_sktexit_module(void) 1245static void __exit caif_sktexit_module(void)
@@ -1386,6 +1248,5 @@ static void __exit caif_sktexit_module(void)
1386 if (debugfsdir != NULL) 1248 if (debugfsdir != NULL)
1387 debugfs_remove_recursive(debugfsdir); 1249 debugfs_remove_recursive(debugfsdir);
1388} 1250}
1389
1390module_init(caif_sktinit_module); 1251module_init(caif_sktinit_module);
1391module_exit(caif_sktexit_module); 1252module_exit(caif_sktexit_module);
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index c873e3d4387c..471c62939fad 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -51,12 +51,11 @@ struct cfcnfg {
51 struct cfcnfg_phyinfo phy_layers[MAX_PHY_LAYERS]; 51 struct cfcnfg_phyinfo phy_layers[MAX_PHY_LAYERS];
52}; 52};
53 53
54static void cncfg_linkup_rsp(struct cflayer *layer, u8 linkid, 54static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id,
55 enum cfctrl_srv serv, u8 phyid, 55 enum cfctrl_srv serv, u8 phyid,
56 struct cflayer *adapt_layer); 56 struct cflayer *adapt_layer);
57static void cncfg_linkdestroy_rsp(struct cflayer *layer, u8 linkid, 57static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id);
58 struct cflayer *client_layer); 58static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
59static void cncfg_reject_rsp(struct cflayer *layer, u8 linkid,
60 struct cflayer *adapt_layer); 59 struct cflayer *adapt_layer);
61static void cfctrl_resp_func(void); 60static void cfctrl_resp_func(void);
62static void cfctrl_enum_resp(void); 61static void cfctrl_enum_resp(void);
@@ -82,13 +81,13 @@ struct cfcnfg *cfcnfg_create(void)
82 resp = cfctrl_get_respfuncs(this->ctrl); 81 resp = cfctrl_get_respfuncs(this->ctrl);
83 resp->enum_rsp = cfctrl_enum_resp; 82 resp->enum_rsp = cfctrl_enum_resp;
84 resp->linkerror_ind = cfctrl_resp_func; 83 resp->linkerror_ind = cfctrl_resp_func;
85 resp->linkdestroy_rsp = cncfg_linkdestroy_rsp; 84 resp->linkdestroy_rsp = cfcnfg_linkdestroy_rsp;
86 resp->sleep_rsp = cfctrl_resp_func; 85 resp->sleep_rsp = cfctrl_resp_func;
87 resp->wake_rsp = cfctrl_resp_func; 86 resp->wake_rsp = cfctrl_resp_func;
88 resp->restart_rsp = cfctrl_resp_func; 87 resp->restart_rsp = cfctrl_resp_func;
89 resp->radioset_rsp = cfctrl_resp_func; 88 resp->radioset_rsp = cfctrl_resp_func;
90 resp->linksetup_rsp = cncfg_linkup_rsp; 89 resp->linksetup_rsp = cfcnfg_linkup_rsp;
91 resp->reject_rsp = cncfg_reject_rsp; 90 resp->reject_rsp = cfcnfg_reject_rsp;
92 91
93 this->last_phyid = 1; 92 this->last_phyid = 1;
94 93
@@ -175,142 +174,82 @@ int cfcnfg_get_named(struct cfcnfg *cnfg, char *name)
175 return 0; 174 return 0;
176} 175}
177 176
178/* 177int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
179 * NOTE: What happens on destroy failure:
180 * 1a) No response - Too early
181 * This will not happen because enumerate has already
182 * completed.
183 * 1b) No response - FATAL
184 * Not handled, but this should be a CAIF PROTOCOL ERROR
185 * Modem error, response is really expected - this
186 * case is not really handled.
187 * 2) O/E-bit indicate error
188 * Ignored - this link is destroyed anyway.
189 * 3) Not able to match on request
190 * Not handled, but this should be a CAIF PROTOCOL ERROR
191 * 4) Link-Error - (no response)
192 * Not handled, but this should be a CAIF PROTOCOL ERROR
193 */
194
195int cfcnfg_del_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
196{ 178{
197 u8 channel_id = 0; 179 u8 channel_id = 0;
198 int ret = 0; 180 int ret = 0;
181 struct cflayer *servl = NULL;
199 struct cfcnfg_phyinfo *phyinfo = NULL; 182 struct cfcnfg_phyinfo *phyinfo = NULL;
200 u8 phyid = 0; 183 u8 phyid = 0;
201
202 caif_assert(adap_layer != NULL); 184 caif_assert(adap_layer != NULL);
203 channel_id = adap_layer->id; 185 channel_id = adap_layer->id;
204 if (channel_id == 0) { 186 if (adap_layer->dn == NULL || channel_id == 0) {
205 pr_err("CAIF: %s():adap_layer->id is 0\n", __func__); 187 pr_err("CAIF: %s():adap_layer->id is 0\n", __func__);
206 ret = -ENOTCONN; 188 ret = -ENOTCONN;
207 goto end; 189 goto end;
208 } 190 }
209 191 servl = cfmuxl_remove_uplayer(cnfg->mux, channel_id);
210 if (adap_layer->dn == NULL) { 192 if (servl == NULL)
211 pr_err("CAIF: %s():adap_layer->dn is NULL\n", __func__);
212 ret = -ENODEV;
213 goto end;
214 }
215
216 if (adap_layer->dn != NULL)
217 phyid = cfsrvl_getphyid(adap_layer->dn);
218
219 phyinfo = cfcnfg_get_phyinfo(cnfg, phyid);
220 if (phyinfo == NULL) {
221 pr_warning("CAIF: %s(): No interface to send disconnect to\n",
222 __func__);
223 ret = -ENODEV;
224 goto end; 193 goto end;
225 } 194 layer_set_up(servl, NULL);
226 195 ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer);
227 if (phyinfo->id != phyid 196 if (servl == NULL) {
228 || phyinfo->phy_layer->id != phyid 197 pr_err("CAIF: %s(): PROTOCOL ERROR "
229 || phyinfo->frm_layer->id != phyid) { 198 "- Error removing service_layer Channel_Id(%d)",
230 199 __func__, channel_id);
231 pr_err("CAIF: %s(): Inconsistency in phy registration\n",
232 __func__);
233 ret = -EINVAL; 200 ret = -EINVAL;
234 goto end; 201 goto end;
235 } 202 }
203 caif_assert(channel_id == servl->id);
204 if (adap_layer->dn != NULL) {
205 phyid = cfsrvl_getphyid(adap_layer->dn);
236 206
237 ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer); 207 phyinfo = cfcnfg_get_phyinfo(cnfg, phyid);
238 208 if (phyinfo == NULL) {
239end: 209 pr_warning("CAIF: %s(): "
210 "No interface to send disconnect to\n",
211 __func__);
212 ret = -ENODEV;
213 goto end;
214 }
215 if (phyinfo->id != phyid ||
216 phyinfo->phy_layer->id != phyid ||
217 phyinfo->frm_layer->id != phyid) {
218 pr_err("CAIF: %s(): "
219 "Inconsistency in phy registration\n",
220 __func__);
221 ret = -EINVAL;
222 goto end;
223 }
224 }
240 if (phyinfo != NULL && --phyinfo->phy_ref_count == 0 && 225 if (phyinfo != NULL && --phyinfo->phy_ref_count == 0 &&
241 phyinfo->phy_layer != NULL && 226 phyinfo->phy_layer != NULL &&
242 phyinfo->phy_layer->modemcmd != NULL) { 227 phyinfo->phy_layer->modemcmd != NULL) {
243 phyinfo->phy_layer->modemcmd(phyinfo->phy_layer, 228 phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
244 _CAIF_MODEMCMD_PHYIF_USELESS); 229 _CAIF_MODEMCMD_PHYIF_USELESS);
245 } 230 }
231end:
232 cfsrvl_put(servl);
233 cfctrl_cancel_req(cnfg->ctrl, adap_layer);
234 if (adap_layer->ctrlcmd != NULL)
235 adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0);
246 return ret; 236 return ret;
247 237
248} 238}
249EXPORT_SYMBOL(cfcnfg_del_adapt_layer); 239EXPORT_SYMBOL(cfcnfg_disconn_adapt_layer);
250 240
251static void cncfg_linkdestroy_rsp(struct cflayer *layer, u8 linkid, 241void cfcnfg_release_adap_layer(struct cflayer *adap_layer)
252 struct cflayer *client_layer)
253{ 242{
254 struct cfcnfg *cnfg = container_obj(layer); 243 if (adap_layer->dn)
255 struct cflayer *servl; 244 cfsrvl_put(adap_layer->dn);
256
257 /*
258 * 1) Remove service from the MUX layer. The MUX must
259 * guarante that no more payload sent "upwards" (receive)
260 */
261 servl = cfmuxl_remove_uplayer(cnfg->mux, linkid);
262
263 if (servl == NULL) {
264 pr_err("CAIF: %s(): PROTOCOL ERROR "
265 "- Error removing service_layer Linkid(%d)",
266 __func__, linkid);
267 return;
268 }
269 caif_assert(linkid == servl->id);
270
271 if (servl != client_layer && servl->up != client_layer) {
272 pr_err("CAIF: %s(): Error removing service_layer "
273 "Linkid(%d) %p %p",
274 __func__, linkid, (void *) servl,
275 (void *) client_layer);
276 return;
277 }
278
279 /*
280 * 2) DEINIT_RSP must guarantee that no more packets are transmitted
281 * from client (adap_layer) when it returns.
282 */
283
284 if (servl->ctrlcmd == NULL) {
285 pr_err("CAIF: %s(): Error servl->ctrlcmd == NULL", __func__);
286 return;
287 }
288
289 servl->ctrlcmd(servl, CAIF_CTRLCMD_DEINIT_RSP, 0);
290
291 /* 3) It is now safe to destroy the service layer. */
292 cfservl_destroy(servl);
293} 245}
246EXPORT_SYMBOL(cfcnfg_release_adap_layer);
294 247
295/* 248static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id)
296 * NOTE: What happens on linksetup failure: 249{
297 * 1a) No response - Too early 250}
298 * This will not happen because enumerate is secured
299 * before using interface.
300 * 1b) No response - FATAL
301 * Not handled, but this should be a CAIF PROTOCOL ERROR
302 * Modem error, response is really expected - this case is
303 * not really handled.
304 * 2) O/E-bit indicate error
305 * Handled in cnfg_reject_rsp
306 * 3) Not able to match on request
307 * Not handled, but this should be a CAIF PROTOCOL ERROR
308 * 4) Link-Error - (no response)
309 * Not handled, but this should be a CAIF PROTOCOL ERROR
310 */
311 251
312int 252int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
313cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
314 struct cfctrl_link_param *param, 253 struct cfctrl_link_param *param,
315 struct cflayer *adap_layer) 254 struct cflayer *adap_layer)
316{ 255{
@@ -340,12 +279,11 @@ cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
340 param->phyid); 279 param->phyid);
341 /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */ 280 /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */
342 cfctrl_enum_req(cnfg->ctrl, param->phyid); 281 cfctrl_enum_req(cnfg->ctrl, param->phyid);
343 cfctrl_linkup_request(cnfg->ctrl, param, adap_layer); 282 return cfctrl_linkup_request(cnfg->ctrl, param, adap_layer);
344 return 0;
345} 283}
346EXPORT_SYMBOL(cfcnfg_add_adaptation_layer); 284EXPORT_SYMBOL(cfcnfg_add_adaptation_layer);
347 285
348static void cncfg_reject_rsp(struct cflayer *layer, u8 linkid, 286static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
349 struct cflayer *adapt_layer) 287 struct cflayer *adapt_layer)
350{ 288{
351 if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL) 289 if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL)
@@ -354,15 +292,17 @@ static void cncfg_reject_rsp(struct cflayer *layer, u8 linkid,
354} 292}
355 293
356static void 294static void
357cncfg_linkup_rsp(struct cflayer *layer, u8 linkid, enum cfctrl_srv serv, 295cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
358 u8 phyid, struct cflayer *adapt_layer) 296 u8 phyid, struct cflayer *adapt_layer)
359{ 297{
360 struct cfcnfg *cnfg = container_obj(layer); 298 struct cfcnfg *cnfg = container_obj(layer);
361 struct cflayer *servicel = NULL; 299 struct cflayer *servicel = NULL;
362 struct cfcnfg_phyinfo *phyinfo; 300 struct cfcnfg_phyinfo *phyinfo;
363 if (adapt_layer == NULL) { 301 if (adapt_layer == NULL) {
364 pr_err("CAIF: %s(): PROTOCOL ERROR " 302 pr_debug("CAIF: %s(): link setup response "
365 "- LinkUp Request/Response did not match\n", __func__); 303 "but no client exist, send linkdown back\n",
304 __func__);
305 cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL);
366 return; 306 return;
367 } 307 }
368 308
@@ -383,26 +323,26 @@ cncfg_linkup_rsp(struct cflayer *layer, u8 linkid, enum cfctrl_srv serv,
383 _CAIF_MODEMCMD_PHYIF_USEFULL); 323 _CAIF_MODEMCMD_PHYIF_USEFULL);
384 324
385 } 325 }
386 adapt_layer->id = linkid; 326 adapt_layer->id = channel_id;
387 327
388 switch (serv) { 328 switch (serv) {
389 case CFCTRL_SRV_VEI: 329 case CFCTRL_SRV_VEI:
390 servicel = cfvei_create(linkid, &phyinfo->dev_info); 330 servicel = cfvei_create(channel_id, &phyinfo->dev_info);
391 break; 331 break;
392 case CFCTRL_SRV_DATAGRAM: 332 case CFCTRL_SRV_DATAGRAM:
393 servicel = cfdgml_create(linkid, &phyinfo->dev_info); 333 servicel = cfdgml_create(channel_id, &phyinfo->dev_info);
394 break; 334 break;
395 case CFCTRL_SRV_RFM: 335 case CFCTRL_SRV_RFM:
396 servicel = cfrfml_create(linkid, &phyinfo->dev_info); 336 servicel = cfrfml_create(channel_id, &phyinfo->dev_info);
397 break; 337 break;
398 case CFCTRL_SRV_UTIL: 338 case CFCTRL_SRV_UTIL:
399 servicel = cfutill_create(linkid, &phyinfo->dev_info); 339 servicel = cfutill_create(channel_id, &phyinfo->dev_info);
400 break; 340 break;
401 case CFCTRL_SRV_VIDEO: 341 case CFCTRL_SRV_VIDEO:
402 servicel = cfvidl_create(linkid, &phyinfo->dev_info); 342 servicel = cfvidl_create(channel_id, &phyinfo->dev_info);
403 break; 343 break;
404 case CFCTRL_SRV_DBG: 344 case CFCTRL_SRV_DBG:
405 servicel = cfdbgl_create(linkid, &phyinfo->dev_info); 345 servicel = cfdbgl_create(channel_id, &phyinfo->dev_info);
406 break; 346 break;
407 default: 347 default:
408 pr_err("CAIF: %s(): Protocol error. " 348 pr_err("CAIF: %s(): Protocol error. "
@@ -415,9 +355,10 @@ cncfg_linkup_rsp(struct cflayer *layer, u8 linkid, enum cfctrl_srv serv,
415 return; 355 return;
416 } 356 }
417 layer_set_dn(servicel, cnfg->mux); 357 layer_set_dn(servicel, cnfg->mux);
418 cfmuxl_set_uplayer(cnfg->mux, servicel, linkid); 358 cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id);
419 layer_set_up(servicel, adapt_layer); 359 layer_set_up(servicel, adapt_layer);
420 layer_set_dn(adapt_layer, servicel); 360 layer_set_dn(adapt_layer, servicel);
361 cfsrvl_get(servicel);
421 servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0); 362 servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0);
422} 363}
423 364
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 11f80140f3cb..a521d32cfe56 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -32,6 +32,7 @@ static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
32 32
33struct cflayer *cfctrl_create(void) 33struct cflayer *cfctrl_create(void)
34{ 34{
35 struct dev_info dev_info;
35 struct cfctrl *this = 36 struct cfctrl *this =
36 kmalloc(sizeof(struct cfctrl), GFP_ATOMIC); 37 kmalloc(sizeof(struct cfctrl), GFP_ATOMIC);
37 if (!this) { 38 if (!this) {
@@ -39,12 +40,13 @@ struct cflayer *cfctrl_create(void)
39 return NULL; 40 return NULL;
40 } 41 }
41 caif_assert(offsetof(struct cfctrl, serv.layer) == 0); 42 caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
43 memset(&dev_info, 0, sizeof(dev_info));
44 dev_info.id = 0xff;
42 memset(this, 0, sizeof(*this)); 45 memset(this, 0, sizeof(*this));
46 cfsrvl_init(&this->serv, 0, &dev_info);
43 spin_lock_init(&this->info_list_lock); 47 spin_lock_init(&this->info_list_lock);
44 atomic_set(&this->req_seq_no, 1); 48 atomic_set(&this->req_seq_no, 1);
45 atomic_set(&this->rsp_seq_no, 1); 49 atomic_set(&this->rsp_seq_no, 1);
46 this->serv.dev_info.id = 0xff;
47 this->serv.layer.id = 0;
48 this->serv.layer.receive = cfctrl_recv; 50 this->serv.layer.receive = cfctrl_recv;
49 sprintf(this->serv.layer.name, "ctrl"); 51 sprintf(this->serv.layer.name, "ctrl");
50 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd; 52 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
@@ -127,20 +129,6 @@ void cfctrl_insert_req(struct cfctrl *ctrl,
127 spin_unlock(&ctrl->info_list_lock); 129 spin_unlock(&ctrl->info_list_lock);
128} 130}
129 131
130static void cfctrl_insert_req2(struct cfctrl *ctrl, enum cfctrl_cmd cmd,
131 u8 linkid, struct cflayer *user_layer)
132{
133 struct cfctrl_request_info *req = kmalloc(sizeof(*req), GFP_KERNEL);
134 if (!req) {
135 pr_warning("CAIF: %s(): Out of memory\n", __func__);
136 return;
137 }
138 req->client_layer = user_layer;
139 req->cmd = cmd;
140 req->channel_id = linkid;
141 cfctrl_insert_req(ctrl, req);
142}
143
144/* Compare and remove request */ 132/* Compare and remove request */
145struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, 133struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
146 struct cfctrl_request_info *req) 134 struct cfctrl_request_info *req)
@@ -234,7 +222,7 @@ void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
234 } 222 }
235} 223}
236 224
237void cfctrl_linkup_request(struct cflayer *layer, 225int cfctrl_linkup_request(struct cflayer *layer,
238 struct cfctrl_link_param *param, 226 struct cfctrl_link_param *param,
239 struct cflayer *user_layer) 227 struct cflayer *user_layer)
240{ 228{
@@ -248,7 +236,7 @@ void cfctrl_linkup_request(struct cflayer *layer,
248 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 236 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
249 if (!pkt) { 237 if (!pkt) {
250 pr_warning("CAIF: %s(): Out of memory\n", __func__); 238 pr_warning("CAIF: %s(): Out of memory\n", __func__);
251 return; 239 return -ENOMEM;
252 } 240 }
253 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP); 241 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP);
254 cfpkt_addbdy(pkt, (param->chtype << 4) + param->linktype); 242 cfpkt_addbdy(pkt, (param->chtype << 4) + param->linktype);
@@ -294,11 +282,12 @@ void cfctrl_linkup_request(struct cflayer *layer,
294 default: 282 default:
295 pr_warning("CAIF: %s():Request setup of bad link type = %d\n", 283 pr_warning("CAIF: %s():Request setup of bad link type = %d\n",
296 __func__, param->linktype); 284 __func__, param->linktype);
285 return -EINVAL;
297 } 286 }
298 req = kmalloc(sizeof(*req), GFP_KERNEL); 287 req = kmalloc(sizeof(*req), GFP_KERNEL);
299 if (!req) { 288 if (!req) {
300 pr_warning("CAIF: %s(): Out of memory\n", __func__); 289 pr_warning("CAIF: %s(): Out of memory\n", __func__);
301 return; 290 return -ENOMEM;
302 } 291 }
303 memset(req, 0, sizeof(*req)); 292 memset(req, 0, sizeof(*req));
304 req->client_layer = user_layer; 293 req->client_layer = user_layer;
@@ -306,6 +295,11 @@ void cfctrl_linkup_request(struct cflayer *layer,
306 req->param = *param; 295 req->param = *param;
307 cfctrl_insert_req(cfctrl, req); 296 cfctrl_insert_req(cfctrl, req);
308 init_info(cfpkt_info(pkt), cfctrl); 297 init_info(cfpkt_info(pkt), cfctrl);
298 /*
299 * NOTE:Always send linkup and linkdown request on the same
300 * device as the payload. Otherwise old queued up payload
301 * might arrive with the newly allocated channel ID.
302 */
309 cfpkt_info(pkt)->dev_info->id = param->phyid; 303 cfpkt_info(pkt)->dev_info->id = param->phyid;
310 ret = 304 ret =
311 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); 305 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
@@ -313,7 +307,9 @@ void cfctrl_linkup_request(struct cflayer *layer,
313 pr_err("CAIF: %s(): Could not transmit linksetup request\n", 307 pr_err("CAIF: %s(): Could not transmit linksetup request\n",
314 __func__); 308 __func__);
315 cfpkt_destroy(pkt); 309 cfpkt_destroy(pkt);
310 return -ENODEV;
316 } 311 }
312 return 0;
317} 313}
318 314
319int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid, 315int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
@@ -326,7 +322,6 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
326 pr_warning("CAIF: %s(): Out of memory\n", __func__); 322 pr_warning("CAIF: %s(): Out of memory\n", __func__);
327 return -ENOMEM; 323 return -ENOMEM;
328 } 324 }
329 cfctrl_insert_req2(cfctrl, CFCTRL_CMD_LINK_DESTROY, channelid, client);
330 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY); 325 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
331 cfpkt_addbdy(pkt, channelid); 326 cfpkt_addbdy(pkt, channelid);
332 init_info(cfpkt_info(pkt), cfctrl); 327 init_info(cfpkt_info(pkt), cfctrl);
@@ -392,6 +387,38 @@ void cfctrl_getstartreason_req(struct cflayer *layer)
392} 387}
393 388
394 389
390void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
391{
392 struct cfctrl_request_info *p, *req;
393 struct cfctrl *ctrl = container_obj(layr);
394 spin_lock(&ctrl->info_list_lock);
395
396 if (ctrl->first_req == NULL) {
397 spin_unlock(&ctrl->info_list_lock);
398 return;
399 }
400
401 if (ctrl->first_req->client_layer == adap_layer) {
402
403 req = ctrl->first_req;
404 ctrl->first_req = ctrl->first_req->next;
405 kfree(req);
406 }
407
408 p = ctrl->first_req;
409 while (p != NULL && p->next != NULL) {
410 if (p->next->client_layer == adap_layer) {
411
412 req = p->next;
413 p->next = p->next->next;
414 kfree(p->next);
415 }
416 p = p->next;
417 }
418
419 spin_unlock(&ctrl->info_list_lock);
420}
421
395static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt) 422static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
396{ 423{
397 u8 cmdrsp; 424 u8 cmdrsp;
@@ -409,11 +436,8 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
409 cmd = cmdrsp & CFCTRL_CMD_MASK; 436 cmd = cmdrsp & CFCTRL_CMD_MASK;
410 if (cmd != CFCTRL_CMD_LINK_ERR 437 if (cmd != CFCTRL_CMD_LINK_ERR
411 && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) { 438 && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) {
412 if (handle_loop(cfctrl, cmd, pkt) == CAIF_FAILURE) { 439 if (handle_loop(cfctrl, cmd, pkt) == CAIF_FAILURE)
413 pr_info("CAIF: %s() CAIF Protocol error:" 440 cmdrsp |= CFCTRL_ERR_BIT;
414 "Response bit not set\n", __func__);
415 goto error;
416 }
417 } 441 }
418 442
419 switch (cmd) { 443 switch (cmd) {
@@ -451,12 +475,16 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
451 switch (serv) { 475 switch (serv) {
452 case CFCTRL_SRV_VEI: 476 case CFCTRL_SRV_VEI:
453 case CFCTRL_SRV_DBG: 477 case CFCTRL_SRV_DBG:
478 if (CFCTRL_ERR_BIT & cmdrsp)
479 break;
454 /* Link ID */ 480 /* Link ID */
455 cfpkt_extr_head(pkt, &linkid, 1); 481 cfpkt_extr_head(pkt, &linkid, 1);
456 break; 482 break;
457 case CFCTRL_SRV_VIDEO: 483 case CFCTRL_SRV_VIDEO:
458 cfpkt_extr_head(pkt, &tmp, 1); 484 cfpkt_extr_head(pkt, &tmp, 1);
459 linkparam.u.video.connid = tmp; 485 linkparam.u.video.connid = tmp;
486 if (CFCTRL_ERR_BIT & cmdrsp)
487 break;
460 /* Link ID */ 488 /* Link ID */
461 cfpkt_extr_head(pkt, &linkid, 1); 489 cfpkt_extr_head(pkt, &linkid, 1);
462 break; 490 break;
@@ -465,6 +493,8 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
465 cfpkt_extr_head(pkt, &tmp32, 4); 493 cfpkt_extr_head(pkt, &tmp32, 4);
466 linkparam.u.datagram.connid = 494 linkparam.u.datagram.connid =
467 le32_to_cpu(tmp32); 495 le32_to_cpu(tmp32);
496 if (CFCTRL_ERR_BIT & cmdrsp)
497 break;
468 /* Link ID */ 498 /* Link ID */
469 cfpkt_extr_head(pkt, &linkid, 1); 499 cfpkt_extr_head(pkt, &linkid, 1);
470 break; 500 break;
@@ -483,6 +513,8 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
483 *cp++ = tmp; 513 *cp++ = tmp;
484 *cp = '\0'; 514 *cp = '\0';
485 515
516 if (CFCTRL_ERR_BIT & cmdrsp)
517 break;
486 /* Link ID */ 518 /* Link ID */
487 cfpkt_extr_head(pkt, &linkid, 1); 519 cfpkt_extr_head(pkt, &linkid, 1);
488 520
@@ -519,6 +551,8 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
519 cfpkt_extr_head(pkt, &tmp, 1); 551 cfpkt_extr_head(pkt, &tmp, 1);
520 *cp++ = tmp; 552 *cp++ = tmp;
521 } 553 }
554 if (CFCTRL_ERR_BIT & cmdrsp)
555 break;
522 /* Link ID */ 556 /* Link ID */
523 cfpkt_extr_head(pkt, &linkid, 1); 557 cfpkt_extr_head(pkt, &linkid, 1);
524 /* Length */ 558 /* Length */
@@ -560,13 +594,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
560 break; 594 break;
561 case CFCTRL_CMD_LINK_DESTROY: 595 case CFCTRL_CMD_LINK_DESTROY:
562 cfpkt_extr_head(pkt, &linkid, 1); 596 cfpkt_extr_head(pkt, &linkid, 1);
563 rsp.cmd = cmd; 597 cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid);
564 rsp.channel_id = linkid;
565 req = cfctrl_remove_req(cfctrl, &rsp);
566 cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid,
567 req ? req->client_layer : NULL);
568 if (req != NULL)
569 kfree(req);
570 break; 598 break;
571 case CFCTRL_CMD_LINK_ERR: 599 case CFCTRL_CMD_LINK_ERR:
572 pr_err("CAIF: %s(): Frame Error Indication received\n", 600 pr_err("CAIF: %s(): Frame Error Indication received\n",
@@ -608,7 +636,7 @@ static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
608 case CAIF_CTRLCMD_FLOW_OFF_IND: 636 case CAIF_CTRLCMD_FLOW_OFF_IND:
609 spin_lock(&this->info_list_lock); 637 spin_lock(&this->info_list_lock);
610 if (this->first_req != NULL) { 638 if (this->first_req != NULL) {
611 pr_warning("CAIF: %s(): Received flow off in " 639 pr_debug("CAIF: %s(): Received flow off in "
612 "control layer", __func__); 640 "control layer", __func__);
613 } 641 }
614 spin_unlock(&this->info_list_lock); 642 spin_unlock(&this->info_list_lock);
@@ -633,6 +661,7 @@ static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt)
633 if (!ctrl->loop_linkused[linkid]) 661 if (!ctrl->loop_linkused[linkid])
634 goto found; 662 goto found;
635 spin_unlock(&ctrl->loop_linkid_lock); 663 spin_unlock(&ctrl->loop_linkid_lock);
664 pr_err("CAIF: %s(): Out of link-ids\n", __func__);
636 return -EINVAL; 665 return -EINVAL;
637found: 666found:
638 if (!ctrl->loop_linkused[linkid]) 667 if (!ctrl->loop_linkused[linkid])
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 6fb9f9e96cf8..7372f27f1d32 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -62,6 +62,7 @@ int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid)
62{ 62{
63 struct cfmuxl *muxl = container_obj(layr); 63 struct cfmuxl *muxl = container_obj(layr);
64 spin_lock(&muxl->receive_lock); 64 spin_lock(&muxl->receive_lock);
65 cfsrvl_get(up);
65 list_add(&up->node, &muxl->srvl_list); 66 list_add(&up->node, &muxl->srvl_list);
66 spin_unlock(&muxl->receive_lock); 67 spin_unlock(&muxl->receive_lock);
67 return 0; 68 return 0;
@@ -172,8 +173,11 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
172 struct cfmuxl *muxl = container_obj(layr); 173 struct cfmuxl *muxl = container_obj(layr);
173 spin_lock(&muxl->receive_lock); 174 spin_lock(&muxl->receive_lock);
174 up = get_up(muxl, id); 175 up = get_up(muxl, id);
176 if (up == NULL)
177 return NULL;
175 memset(muxl->up_cache, 0, sizeof(muxl->up_cache)); 178 memset(muxl->up_cache, 0, sizeof(muxl->up_cache));
176 list_del(&up->node); 179 list_del(&up->node);
180 cfsrvl_put(up);
177 spin_unlock(&muxl->receive_lock); 181 spin_unlock(&muxl->receive_lock);
178 return up; 182 return up;
179} 183}
@@ -203,8 +207,9 @@ static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
203 */ 207 */
204 return /* CFGLU_EPROT; */ 0; 208 return /* CFGLU_EPROT; */ 0;
205 } 209 }
206 210 cfsrvl_get(up);
207 ret = up->receive(up, pkt); 211 ret = up->receive(up, pkt);
212 cfsrvl_put(up);
208 return ret; 213 return ret;
209} 214}
210 215
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index d470c51c6431..aff31f34528f 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -158,6 +158,13 @@ void cfsrvl_init(struct cfsrvl *service,
158 service->layer.ctrlcmd = cfservl_ctrlcmd; 158 service->layer.ctrlcmd = cfservl_ctrlcmd;
159 service->layer.modemcmd = cfservl_modemcmd; 159 service->layer.modemcmd = cfservl_modemcmd;
160 service->dev_info = *dev_info; 160 service->dev_info = *dev_info;
161 kref_init(&service->ref);
162}
163
164void cfsrvl_release(struct kref *kref)
165{
166 struct cfsrvl *service = container_of(kref, struct cfsrvl, ref);
167 kfree(service);
161} 168}
162 169
163bool cfsrvl_ready(struct cfsrvl *service, int *err) 170bool cfsrvl_ready(struct cfsrvl *service, int *err)
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index f622ff1d39ba..610966abe2dc 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -22,10 +22,10 @@
22#include <net/caif/cfpkt.h> 22#include <net/caif/cfpkt.h>
23#include <net/caif/caif_dev.h> 23#include <net/caif/caif_dev.h>
24 24
25#define CAIF_CONNECT_TIMEOUT 30 25/* GPRS PDP connection has MTU to 1500 */
26#define SIZE_MTU 1500 26#define SIZE_MTU 1500
27#define SIZE_MTU_MAX 4080 27/* 5 sec. connect timeout */
28#define SIZE_MTU_MIN 68 28#define CONNECT_TIMEOUT (5 * HZ)
29#define CAIF_NET_DEFAULT_QUEUE_LEN 500 29#define CAIF_NET_DEFAULT_QUEUE_LEN 500
30 30
31#undef pr_debug 31#undef pr_debug
@@ -37,6 +37,13 @@ static LIST_HEAD(chnl_net_list);
37MODULE_LICENSE("GPL"); 37MODULE_LICENSE("GPL");
38MODULE_ALIAS_RTNL_LINK("caif"); 38MODULE_ALIAS_RTNL_LINK("caif");
39 39
40enum caif_states {
41 CAIF_CONNECTED = 1,
42 CAIF_CONNECTING,
43 CAIF_DISCONNECTED,
44 CAIF_SHUTDOWN
45};
46
40struct chnl_net { 47struct chnl_net {
41 struct cflayer chnl; 48 struct cflayer chnl;
42 struct net_device_stats stats; 49 struct net_device_stats stats;
@@ -47,7 +54,7 @@ struct chnl_net {
47 wait_queue_head_t netmgmt_wq; 54 wait_queue_head_t netmgmt_wq;
48 /* Flow status to remember and control the transmission. */ 55 /* Flow status to remember and control the transmission. */
49 bool flowenabled; 56 bool flowenabled;
50 bool pending_close; 57 enum caif_states state;
51}; 58};
52 59
53static void robust_list_del(struct list_head *delete_node) 60static void robust_list_del(struct list_head *delete_node)
@@ -58,15 +65,16 @@ static void robust_list_del(struct list_head *delete_node)
58 list_for_each_safe(list_node, n, &chnl_net_list) { 65 list_for_each_safe(list_node, n, &chnl_net_list) {
59 if (list_node == delete_node) { 66 if (list_node == delete_node) {
60 list_del(list_node); 67 list_del(list_node);
61 break; 68 return;
62 } 69 }
63 } 70 }
71 WARN_ON(1);
64} 72}
65 73
66static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) 74static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
67{ 75{
68 struct sk_buff *skb; 76 struct sk_buff *skb;
69 struct chnl_net *priv = NULL; 77 struct chnl_net *priv = container_of(layr, struct chnl_net, chnl);
70 int pktlen; 78 int pktlen;
71 int err = 0; 79 int err = 0;
72 80
@@ -91,7 +99,6 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
91 else 99 else
92 skb->ip_summed = CHECKSUM_NONE; 100 skb->ip_summed = CHECKSUM_NONE;
93 101
94 /* FIXME: Drivers should call this in tasklet context. */
95 if (in_interrupt()) 102 if (in_interrupt())
96 netif_rx(skb); 103 netif_rx(skb);
97 else 104 else
@@ -117,23 +124,25 @@ static void close_work(struct work_struct *work)
117 struct chnl_net *dev = NULL; 124 struct chnl_net *dev = NULL;
118 struct list_head *list_node; 125 struct list_head *list_node;
119 struct list_head *_tmp; 126 struct list_head *_tmp;
120 rtnl_lock(); 127 /* May be called with or without RTNL lock held */
128 int islocked = rtnl_is_locked();
129 if (!islocked)
130 rtnl_lock();
121 list_for_each_safe(list_node, _tmp, &chnl_net_list) { 131 list_for_each_safe(list_node, _tmp, &chnl_net_list) {
122 dev = list_entry(list_node, struct chnl_net, list_field); 132 dev = list_entry(list_node, struct chnl_net, list_field);
123 if (!dev->pending_close) 133 if (dev->state == CAIF_SHUTDOWN)
124 continue; 134 dev_close(dev->netdev);
125 list_del(list_node);
126 delete_device(dev);
127 } 135 }
128 rtnl_unlock(); 136 if (!islocked)
137 rtnl_unlock();
129} 138}
130static DECLARE_WORK(close_worker, close_work); 139static DECLARE_WORK(close_worker, close_work);
131 140
132static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow, 141static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow,
133 int phyid) 142 int phyid)
134{ 143{
135 struct chnl_net *priv; 144 struct chnl_net *priv = container_of(layr, struct chnl_net, chnl);
136 pr_debug("CAIF: %s(): NET flowctrl func called flow: %s.\n", 145 pr_debug("CAIF: %s(): NET flowctrl func called flow: %s\n",
137 __func__, 146 __func__,
138 flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" : 147 flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" :
139 flow == CAIF_CTRLCMD_INIT_RSP ? "INIT" : 148 flow == CAIF_CTRLCMD_INIT_RSP ? "INIT" :
@@ -143,21 +152,31 @@ static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow,
143 flow == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ? 152 flow == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ?
144 "REMOTE_SHUTDOWN" : "UKNOWN CTRL COMMAND"); 153 "REMOTE_SHUTDOWN" : "UKNOWN CTRL COMMAND");
145 154
146 priv = container_of(layr, struct chnl_net, chnl); 155
147 156
148 switch (flow) { 157 switch (flow) {
149 case CAIF_CTRLCMD_FLOW_OFF_IND: 158 case CAIF_CTRLCMD_FLOW_OFF_IND:
159 priv->flowenabled = false;
160 netif_stop_queue(priv->netdev);
161 break;
150 case CAIF_CTRLCMD_DEINIT_RSP: 162 case CAIF_CTRLCMD_DEINIT_RSP:
163 priv->state = CAIF_DISCONNECTED;
164 break;
151 case CAIF_CTRLCMD_INIT_FAIL_RSP: 165 case CAIF_CTRLCMD_INIT_FAIL_RSP:
166 priv->state = CAIF_DISCONNECTED;
167 wake_up_interruptible(&priv->netmgmt_wq);
168 break;
152 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: 169 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
153 priv->flowenabled = false; 170 priv->state = CAIF_SHUTDOWN;
154 netif_tx_disable(priv->netdev); 171 netif_tx_disable(priv->netdev);
155 pr_warning("CAIF: %s(): done\n", __func__);
156 priv->pending_close = 1;
157 schedule_work(&close_worker); 172 schedule_work(&close_worker);
158 break; 173 break;
159 case CAIF_CTRLCMD_FLOW_ON_IND: 174 case CAIF_CTRLCMD_FLOW_ON_IND:
175 priv->flowenabled = true;
176 netif_wake_queue(priv->netdev);
177 break;
160 case CAIF_CTRLCMD_INIT_RSP: 178 case CAIF_CTRLCMD_INIT_RSP:
179 priv->state = CAIF_CONNECTED;
161 priv->flowenabled = true; 180 priv->flowenabled = true;
162 netif_wake_queue(priv->netdev); 181 netif_wake_queue(priv->netdev);
163 wake_up_interruptible(&priv->netmgmt_wq); 182 wake_up_interruptible(&priv->netmgmt_wq);
@@ -194,9 +213,6 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
194 213
195 pkt = cfpkt_fromnative(CAIF_DIR_OUT, (void *) skb); 214 pkt = cfpkt_fromnative(CAIF_DIR_OUT, (void *) skb);
196 215
197 pr_debug("CAIF: %s(): transmit inst %s %d,%p\n",
198 __func__, dev->name, priv->chnl.dn->id, &priv->chnl.dn);
199
200 /* Send the packet down the stack. */ 216 /* Send the packet down the stack. */
201 result = priv->chnl.dn->transmit(priv->chnl.dn, pkt); 217 result = priv->chnl.dn->transmit(priv->chnl.dn, pkt);
202 if (result) { 218 if (result) {
@@ -217,61 +233,59 @@ static int chnl_net_open(struct net_device *dev)
217 struct chnl_net *priv = NULL; 233 struct chnl_net *priv = NULL;
218 int result = -1; 234 int result = -1;
219 ASSERT_RTNL(); 235 ASSERT_RTNL();
220
221 priv = netdev_priv(dev); 236 priv = netdev_priv(dev);
222 pr_debug("CAIF: %s(): dev name: %s\n", __func__, priv->name);
223
224 if (!priv) { 237 if (!priv) {
225 pr_debug("CAIF: %s(): chnl_net_open: no priv\n", __func__); 238 pr_debug("CAIF: %s(): chnl_net_open: no priv\n", __func__);
226 return -ENODEV; 239 return -ENODEV;
227 } 240 }
228 result = caif_connect_client(&priv->conn_req, &priv->chnl); 241
229 if (result != 0) { 242 if (priv->state != CAIF_CONNECTING) {
230 pr_debug("CAIF: %s(): err: " 243 priv->state = CAIF_CONNECTING;
231 "Unable to register and open device, Err:%d\n", 244 result = caif_connect_client(&priv->conn_req, &priv->chnl);
232 __func__, 245 if (result != 0) {
233 result); 246 priv->state = CAIF_DISCONNECTED;
234 return -ENODEV; 247 pr_debug("CAIF: %s(): err: "
248 "Unable to register and open device,"
249 " Err:%d\n",
250 __func__,
251 result);
252 return result;
253 }
235 } 254 }
236 result = wait_event_interruptible(priv->netmgmt_wq, priv->flowenabled); 255
256 result = wait_event_interruptible_timeout(priv->netmgmt_wq,
257 priv->state != CAIF_CONNECTING,
258 CONNECT_TIMEOUT);
237 259
238 if (result == -ERESTARTSYS) { 260 if (result == -ERESTARTSYS) {
239 pr_debug("CAIF: %s(): wait_event_interruptible" 261 pr_debug("CAIF: %s(): wait_event_interruptible"
240 " woken by a signal\n", __func__); 262 " woken by a signal\n", __func__);
241 return -ERESTARTSYS; 263 return -ERESTARTSYS;
242 } else 264 }
243 pr_debug("CAIF: %s(): Flow on recieved\n", __func__); 265 if (result == 0) {
266 pr_debug("CAIF: %s(): connect timeout\n", __func__);
267 caif_disconnect_client(&priv->chnl);
268 priv->state = CAIF_DISCONNECTED;
269 pr_debug("CAIF: %s(): state disconnected\n", __func__);
270 return -ETIMEDOUT;
271 }
244 272
273 if (priv->state != CAIF_CONNECTED) {
274 pr_debug("CAIF: %s(): connect failed\n", __func__);
275 return -ECONNREFUSED;
276 }
277 pr_debug("CAIF: %s(): CAIF Netdevice connected\n", __func__);
245 return 0; 278 return 0;
246} 279}
247 280
248static int chnl_net_stop(struct net_device *dev) 281static int chnl_net_stop(struct net_device *dev)
249{ 282{
250 struct chnl_net *priv; 283 struct chnl_net *priv;
251 int result = -1; 284
252 ASSERT_RTNL(); 285 ASSERT_RTNL();
253 priv = netdev_priv(dev); 286 priv = netdev_priv(dev);
254 287 priv->state = CAIF_DISCONNECTED;
255 result = caif_disconnect_client(&priv->chnl); 288 caif_disconnect_client(&priv->chnl);
256 if (result != 0) {
257 pr_debug("CAIF: %s(): chnl_net_stop: err: "
258 "Unable to STOP device, Err:%d\n",
259 __func__, result);
260 return -EBUSY;
261 }
262 result = wait_event_interruptible(priv->netmgmt_wq,
263 !priv->flowenabled);
264
265 if (result == -ERESTARTSYS) {
266 pr_debug("CAIF: %s(): wait_event_interruptible woken by"
267 " signal, signal_pending(current) = %d\n",
268 __func__,
269 signal_pending(current));
270 } else {
271 pr_debug("CAIF: %s(): disconnect received\n", __func__);
272
273 }
274
275 return 0; 289 return 0;
276} 290}
277 291
@@ -377,6 +391,8 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
377 ASSERT_RTNL(); 391 ASSERT_RTNL();
378 caifdev = netdev_priv(dev); 392 caifdev = netdev_priv(dev);
379 caif_netlink_parms(data, &caifdev->conn_req); 393 caif_netlink_parms(data, &caifdev->conn_req);
394 dev_net_set(caifdev->netdev, src_net);
395
380 ret = register_netdevice(dev); 396 ret = register_netdevice(dev);
381 if (ret) 397 if (ret)
382 pr_warning("CAIF: %s(): device rtml registration failed\n", 398 pr_warning("CAIF: %s(): device rtml registration failed\n",
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 2dccd4ee591b..e0097531417a 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -86,7 +86,7 @@ static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
86 int error; 86 int error;
87 DEFINE_WAIT_FUNC(wait, receiver_wake_function); 87 DEFINE_WAIT_FUNC(wait, receiver_wake_function);
88 88
89 prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 89 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
90 90
91 /* Socket errors? */ 91 /* Socket errors? */
92 error = sock_error(sk); 92 error = sock_error(sk);
@@ -115,7 +115,7 @@ static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
115 error = 0; 115 error = 0;
116 *timeo_p = schedule_timeout(*timeo_p); 116 *timeo_p = schedule_timeout(*timeo_p);
117out: 117out:
118 finish_wait(sk->sk_sleep, &wait); 118 finish_wait(sk_sleep(sk), &wait);
119 return error; 119 return error;
120interrupted: 120interrupted:
121 error = sock_intr_errno(*timeo_p); 121 error = sock_intr_errno(*timeo_p);
@@ -229,9 +229,18 @@ EXPORT_SYMBOL(skb_free_datagram);
229 229
230void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) 230void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
231{ 231{
232 lock_sock(sk); 232 if (likely(atomic_read(&skb->users) == 1))
233 skb_free_datagram(sk, skb); 233 smp_rmb();
234 release_sock(sk); 234 else if (likely(!atomic_dec_and_test(&skb->users)))
235 return;
236
237 lock_sock_bh(sk);
238 skb_orphan(skb);
239 sk_mem_reclaim_partial(sk);
240 unlock_sock_bh(sk);
241
242 /* skb is now orphaned, can be freed outside of locked section */
243 __kfree_skb(skb);
235} 244}
236EXPORT_SYMBOL(skb_free_datagram_locked); 245EXPORT_SYMBOL(skb_free_datagram_locked);
237 246
@@ -726,7 +735,7 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
726 struct sock *sk = sock->sk; 735 struct sock *sk = sock->sk;
727 unsigned int mask; 736 unsigned int mask;
728 737
729 sock_poll_wait(file, sk->sk_sleep, wait); 738 sock_poll_wait(file, sk_sleep(sk), wait);
730 mask = 0; 739 mask = 0;
731 740
732 /* exceptional events? */ 741 /* exceptional events? */
diff --git a/net/core/dev.c b/net/core/dev.c
index b31d5d69a467..32611c8f1219 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1557,8 +1557,9 @@ static inline void __netif_reschedule(struct Qdisc *q)
1557 1557
1558 local_irq_save(flags); 1558 local_irq_save(flags);
1559 sd = &__get_cpu_var(softnet_data); 1559 sd = &__get_cpu_var(softnet_data);
1560 q->next_sched = sd->output_queue; 1560 q->next_sched = NULL;
1561 sd->output_queue = q; 1561 *sd->output_queue_tailp = q;
1562 sd->output_queue_tailp = &q->next_sched;
1562 raise_softirq_irqoff(NET_TX_SOFTIRQ); 1563 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1563 local_irq_restore(flags); 1564 local_irq_restore(flags);
1564} 1565}
@@ -1902,13 +1903,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1902 if (!list_empty(&ptype_all)) 1903 if (!list_empty(&ptype_all))
1903 dev_queue_xmit_nit(skb, dev); 1904 dev_queue_xmit_nit(skb, dev);
1904 1905
1905 if (netif_needs_gso(dev, skb)) {
1906 if (unlikely(dev_gso_segment(skb)))
1907 goto out_kfree_skb;
1908 if (skb->next)
1909 goto gso;
1910 }
1911
1912 /* 1906 /*
1913 * If device doesnt need skb->dst, release it right now while 1907 * If device doesnt need skb->dst, release it right now while
1914 * its hot in this cpu cache 1908 * its hot in this cpu cache
@@ -1917,6 +1911,14 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1917 skb_dst_drop(skb); 1911 skb_dst_drop(skb);
1918 1912
1919 skb_orphan_try(skb); 1913 skb_orphan_try(skb);
1914
1915 if (netif_needs_gso(dev, skb)) {
1916 if (unlikely(dev_gso_segment(skb)))
1917 goto out_kfree_skb;
1918 if (skb->next)
1919 goto gso;
1920 }
1921
1920 rc = ops->ndo_start_xmit(skb, dev); 1922 rc = ops->ndo_start_xmit(skb, dev);
1921 if (rc == NETDEV_TX_OK) 1923 if (rc == NETDEV_TX_OK)
1922 txq_trans_update(txq); 1924 txq_trans_update(txq);
@@ -1937,7 +1939,6 @@ gso:
1937 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 1939 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1938 skb_dst_drop(nskb); 1940 skb_dst_drop(nskb);
1939 1941
1940 skb_orphan_try(nskb);
1941 rc = ops->ndo_start_xmit(nskb, dev); 1942 rc = ops->ndo_start_xmit(nskb, dev);
1942 if (unlikely(rc != NETDEV_TX_OK)) { 1943 if (unlikely(rc != NETDEV_TX_OK)) {
1943 if (rc & ~NETDEV_TX_MASK) 1944 if (rc & ~NETDEV_TX_MASK)
@@ -2015,8 +2016,12 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2015 if (dev->real_num_tx_queues > 1) 2016 if (dev->real_num_tx_queues > 1)
2016 queue_index = skb_tx_hash(dev, skb); 2017 queue_index = skb_tx_hash(dev, skb);
2017 2018
2018 if (sk && rcu_dereference_check(sk->sk_dst_cache, 1)) 2019 if (sk) {
2019 sk_tx_queue_set(sk, queue_index); 2020 struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1);
2021
2022 if (dst && skb_dst(skb) == dst)
2023 sk_tx_queue_set(sk, queue_index);
2024 }
2020 } 2025 }
2021 } 2026 }
2022 2027
@@ -2200,7 +2205,13 @@ int netdev_max_backlog __read_mostly = 1000;
2200int netdev_budget __read_mostly = 300; 2205int netdev_budget __read_mostly = 300;
2201int weight_p __read_mostly = 64; /* old backlog weight */ 2206int weight_p __read_mostly = 64; /* old backlog weight */
2202 2207
2203DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; 2208/* Called with irq disabled */
2209static inline void ____napi_schedule(struct softnet_data *sd,
2210 struct napi_struct *napi)
2211{
2212 list_add_tail(&napi->poll_list, &sd->poll_list);
2213 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2214}
2204 2215
2205#ifdef CONFIG_RPS 2216#ifdef CONFIG_RPS
2206 2217
@@ -2225,7 +2236,11 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2225 int cpu = -1; 2236 int cpu = -1;
2226 u8 ip_proto; 2237 u8 ip_proto;
2227 u16 tcpu; 2238 u16 tcpu;
2228 u32 addr1, addr2, ports, ihl; 2239 u32 addr1, addr2, ihl;
2240 union {
2241 u32 v32;
2242 u16 v16[2];
2243 } ports;
2229 2244
2230 if (skb_rx_queue_recorded(skb)) { 2245 if (skb_rx_queue_recorded(skb)) {
2231 u16 index = skb_get_rx_queue(skb); 2246 u16 index = skb_get_rx_queue(skb);
@@ -2271,7 +2286,6 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2271 default: 2286 default:
2272 goto done; 2287 goto done;
2273 } 2288 }
2274 ports = 0;
2275 switch (ip_proto) { 2289 switch (ip_proto) {
2276 case IPPROTO_TCP: 2290 case IPPROTO_TCP:
2277 case IPPROTO_UDP: 2291 case IPPROTO_UDP:
@@ -2281,25 +2295,20 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2281 case IPPROTO_SCTP: 2295 case IPPROTO_SCTP:
2282 case IPPROTO_UDPLITE: 2296 case IPPROTO_UDPLITE:
2283 if (pskb_may_pull(skb, (ihl * 4) + 4)) { 2297 if (pskb_may_pull(skb, (ihl * 4) + 4)) {
2284 __be16 *hports = (__be16 *) (skb->data + (ihl * 4)); 2298 ports.v32 = * (__force u32 *) (skb->data + (ihl * 4));
2285 u32 sport, dport; 2299 if (ports.v16[1] < ports.v16[0])
2286 2300 swap(ports.v16[0], ports.v16[1]);
2287 sport = (__force u16) hports[0]; 2301 break;
2288 dport = (__force u16) hports[1];
2289 if (dport < sport)
2290 swap(sport, dport);
2291 ports = (sport << 16) + dport;
2292 } 2302 }
2293 break;
2294
2295 default: 2303 default:
2304 ports.v32 = 0;
2296 break; 2305 break;
2297 } 2306 }
2298 2307
2299 /* get a consistent hash (same value on both flow directions) */ 2308 /* get a consistent hash (same value on both flow directions) */
2300 if (addr2 < addr1) 2309 if (addr2 < addr1)
2301 swap(addr1, addr2); 2310 swap(addr1, addr2);
2302 skb->rxhash = jhash_3words(addr1, addr2, ports, hashrnd); 2311 skb->rxhash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
2303 if (!skb->rxhash) 2312 if (!skb->rxhash)
2304 skb->rxhash = 1; 2313 skb->rxhash = 1;
2305 2314
@@ -2362,8 +2371,8 @@ static void rps_trigger_softirq(void *data)
2362{ 2371{
2363 struct softnet_data *sd = data; 2372 struct softnet_data *sd = data;
2364 2373
2365 __napi_schedule(&sd->backlog); 2374 ____napi_schedule(sd, &sd->backlog);
2366 __get_cpu_var(netdev_rx_stat).received_rps++; 2375 sd->received_rps++;
2367} 2376}
2368 2377
2369#endif /* CONFIG_RPS */ 2378#endif /* CONFIG_RPS */
@@ -2402,15 +2411,15 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2402 sd = &per_cpu(softnet_data, cpu); 2411 sd = &per_cpu(softnet_data, cpu);
2403 2412
2404 local_irq_save(flags); 2413 local_irq_save(flags);
2405 __get_cpu_var(netdev_rx_stat).total++;
2406 2414
2407 rps_lock(sd); 2415 rps_lock(sd);
2408 if (sd->input_pkt_queue.qlen <= netdev_max_backlog) { 2416 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
2409 if (sd->input_pkt_queue.qlen) { 2417 if (skb_queue_len(&sd->input_pkt_queue)) {
2410enqueue: 2418enqueue:
2411 __skb_queue_tail(&sd->input_pkt_queue, skb); 2419 __skb_queue_tail(&sd->input_pkt_queue, skb);
2412#ifdef CONFIG_RPS 2420#ifdef CONFIG_RPS
2413 *qtail = sd->input_queue_head + sd->input_pkt_queue.qlen; 2421 *qtail = sd->input_queue_head +
2422 skb_queue_len(&sd->input_pkt_queue);
2414#endif 2423#endif
2415 rps_unlock(sd); 2424 rps_unlock(sd);
2416 local_irq_restore(flags); 2425 local_irq_restore(flags);
@@ -2420,14 +2429,14 @@ enqueue:
2420 /* Schedule NAPI for backlog device */ 2429 /* Schedule NAPI for backlog device */
2421 if (napi_schedule_prep(&sd->backlog)) { 2430 if (napi_schedule_prep(&sd->backlog)) {
2422 if (!rps_ipi_queued(sd)) 2431 if (!rps_ipi_queued(sd))
2423 __napi_schedule(&sd->backlog); 2432 ____napi_schedule(sd, &sd->backlog);
2424 } 2433 }
2425 goto enqueue; 2434 goto enqueue;
2426 } 2435 }
2427 2436
2437 sd->dropped++;
2428 rps_unlock(sd); 2438 rps_unlock(sd);
2429 2439
2430 __get_cpu_var(netdev_rx_stat).dropped++;
2431 local_irq_restore(flags); 2440 local_irq_restore(flags);
2432 2441
2433 kfree_skb(skb); 2442 kfree_skb(skb);
@@ -2527,6 +2536,7 @@ static void net_tx_action(struct softirq_action *h)
2527 local_irq_disable(); 2536 local_irq_disable();
2528 head = sd->output_queue; 2537 head = sd->output_queue;
2529 sd->output_queue = NULL; 2538 sd->output_queue = NULL;
2539 sd->output_queue_tailp = &sd->output_queue;
2530 local_irq_enable(); 2540 local_irq_enable();
2531 2541
2532 while (head) { 2542 while (head) {
@@ -2801,7 +2811,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
2801 skb->dev = master; 2811 skb->dev = master;
2802 } 2812 }
2803 2813
2804 __get_cpu_var(netdev_rx_stat).total++; 2814 __get_cpu_var(softnet_data).processed++;
2805 2815
2806 skb_reset_network_header(skb); 2816 skb_reset_network_header(skb);
2807 skb_reset_transport_header(skb); 2817 skb_reset_transport_header(skb);
@@ -2930,13 +2940,21 @@ static void flush_backlog(void *arg)
2930 struct sk_buff *skb, *tmp; 2940 struct sk_buff *skb, *tmp;
2931 2941
2932 rps_lock(sd); 2942 rps_lock(sd);
2933 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) 2943 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
2934 if (skb->dev == dev) { 2944 if (skb->dev == dev) {
2935 __skb_unlink(skb, &sd->input_pkt_queue); 2945 __skb_unlink(skb, &sd->input_pkt_queue);
2936 kfree_skb(skb); 2946 kfree_skb(skb);
2937 input_queue_head_incr(sd); 2947 input_queue_head_add(sd, 1);
2938 } 2948 }
2949 }
2939 rps_unlock(sd); 2950 rps_unlock(sd);
2951
2952 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
2953 if (skb->dev == dev) {
2954 __skb_unlink(skb, &sd->process_queue);
2955 kfree_skb(skb);
2956 }
2957 }
2940} 2958}
2941 2959
2942static int napi_gro_complete(struct sk_buff *skb) 2960static int napi_gro_complete(struct sk_buff *skb)
@@ -3239,30 +3257,85 @@ gro_result_t napi_gro_frags(struct napi_struct *napi)
3239} 3257}
3240EXPORT_SYMBOL(napi_gro_frags); 3258EXPORT_SYMBOL(napi_gro_frags);
3241 3259
3260/*
3261 * net_rps_action sends any pending IPI's for rps.
3262 * Note: called with local irq disabled, but exits with local irq enabled.
3263 */
3264static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3265{
3266#ifdef CONFIG_RPS
3267 struct softnet_data *remsd = sd->rps_ipi_list;
3268
3269 if (remsd) {
3270 sd->rps_ipi_list = NULL;
3271
3272 local_irq_enable();
3273
3274 /* Send pending IPI's to kick RPS processing on remote cpus. */
3275 while (remsd) {
3276 struct softnet_data *next = remsd->rps_ipi_next;
3277
3278 if (cpu_online(remsd->cpu))
3279 __smp_call_function_single(remsd->cpu,
3280 &remsd->csd, 0);
3281 remsd = next;
3282 }
3283 } else
3284#endif
3285 local_irq_enable();
3286}
3287
3242static int process_backlog(struct napi_struct *napi, int quota) 3288static int process_backlog(struct napi_struct *napi, int quota)
3243{ 3289{
3244 int work = 0; 3290 int work = 0;
3245 struct softnet_data *sd = &__get_cpu_var(softnet_data); 3291 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
3246 3292
3293#ifdef CONFIG_RPS
3294 /* Check if we have pending ipi, its better to send them now,
3295 * not waiting net_rx_action() end.
3296 */
3297 if (sd->rps_ipi_list) {
3298 local_irq_disable();
3299 net_rps_action_and_irq_enable(sd);
3300 }
3301#endif
3247 napi->weight = weight_p; 3302 napi->weight = weight_p;
3248 do { 3303 local_irq_disable();
3304 while (work < quota) {
3249 struct sk_buff *skb; 3305 struct sk_buff *skb;
3306 unsigned int qlen;
3250 3307
3251 local_irq_disable(); 3308 while ((skb = __skb_dequeue(&sd->process_queue))) {
3252 rps_lock(sd);
3253 skb = __skb_dequeue(&sd->input_pkt_queue);
3254 if (!skb) {
3255 __napi_complete(napi);
3256 rps_unlock(sd);
3257 local_irq_enable(); 3309 local_irq_enable();
3258 break; 3310 __netif_receive_skb(skb);
3311 if (++work >= quota)
3312 return work;
3313 local_irq_disable();
3259 } 3314 }
3260 input_queue_head_incr(sd);
3261 rps_unlock(sd);
3262 local_irq_enable();
3263 3315
3264 __netif_receive_skb(skb); 3316 rps_lock(sd);
3265 } while (++work < quota); 3317 qlen = skb_queue_len(&sd->input_pkt_queue);
3318 if (qlen) {
3319 input_queue_head_add(sd, qlen);
3320 skb_queue_splice_tail_init(&sd->input_pkt_queue,
3321 &sd->process_queue);
3322 }
3323 if (qlen < quota - work) {
3324 /*
3325 * Inline a custom version of __napi_complete().
3326 * only current cpu owns and manipulates this napi,
3327 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
3328 * we can use a plain write instead of clear_bit(),
3329 * and we dont need an smp_mb() memory barrier.
3330 */
3331 list_del(&napi->poll_list);
3332 napi->state = 0;
3333
3334 quota = work + qlen;
3335 }
3336 rps_unlock(sd);
3337 }
3338 local_irq_enable();
3266 3339
3267 return work; 3340 return work;
3268} 3341}
@@ -3278,8 +3351,7 @@ void __napi_schedule(struct napi_struct *n)
3278 unsigned long flags; 3351 unsigned long flags;
3279 3352
3280 local_irq_save(flags); 3353 local_irq_save(flags);
3281 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list); 3354 ____napi_schedule(&__get_cpu_var(softnet_data), n);
3282 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3283 local_irq_restore(flags); 3355 local_irq_restore(flags);
3284} 3356}
3285EXPORT_SYMBOL(__napi_schedule); 3357EXPORT_SYMBOL(__napi_schedule);
@@ -3350,45 +3422,16 @@ void netif_napi_del(struct napi_struct *napi)
3350} 3422}
3351EXPORT_SYMBOL(netif_napi_del); 3423EXPORT_SYMBOL(netif_napi_del);
3352 3424
3353/*
3354 * net_rps_action sends any pending IPI's for rps.
3355 * Note: called with local irq disabled, but exits with local irq enabled.
3356 */
3357static void net_rps_action_and_irq_disable(void)
3358{
3359#ifdef CONFIG_RPS
3360 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3361 struct softnet_data *remsd = sd->rps_ipi_list;
3362
3363 if (remsd) {
3364 sd->rps_ipi_list = NULL;
3365
3366 local_irq_enable();
3367
3368 /* Send pending IPI's to kick RPS processing on remote cpus. */
3369 while (remsd) {
3370 struct softnet_data *next = remsd->rps_ipi_next;
3371
3372 if (cpu_online(remsd->cpu))
3373 __smp_call_function_single(remsd->cpu,
3374 &remsd->csd, 0);
3375 remsd = next;
3376 }
3377 } else
3378#endif
3379 local_irq_enable();
3380}
3381
3382static void net_rx_action(struct softirq_action *h) 3425static void net_rx_action(struct softirq_action *h)
3383{ 3426{
3384 struct list_head *list = &__get_cpu_var(softnet_data).poll_list; 3427 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3385 unsigned long time_limit = jiffies + 2; 3428 unsigned long time_limit = jiffies + 2;
3386 int budget = netdev_budget; 3429 int budget = netdev_budget;
3387 void *have; 3430 void *have;
3388 3431
3389 local_irq_disable(); 3432 local_irq_disable();
3390 3433
3391 while (!list_empty(list)) { 3434 while (!list_empty(&sd->poll_list)) {
3392 struct napi_struct *n; 3435 struct napi_struct *n;
3393 int work, weight; 3436 int work, weight;
3394 3437
@@ -3406,7 +3449,7 @@ static void net_rx_action(struct softirq_action *h)
3406 * entries to the tail of this list, and only ->poll() 3449 * entries to the tail of this list, and only ->poll()
3407 * calls can remove this head entry from the list. 3450 * calls can remove this head entry from the list.
3408 */ 3451 */
3409 n = list_first_entry(list, struct napi_struct, poll_list); 3452 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
3410 3453
3411 have = netpoll_poll_lock(n); 3454 have = netpoll_poll_lock(n);
3412 3455
@@ -3441,13 +3484,13 @@ static void net_rx_action(struct softirq_action *h)
3441 napi_complete(n); 3484 napi_complete(n);
3442 local_irq_disable(); 3485 local_irq_disable();
3443 } else 3486 } else
3444 list_move_tail(&n->poll_list, list); 3487 list_move_tail(&n->poll_list, &sd->poll_list);
3445 } 3488 }
3446 3489
3447 netpoll_poll_unlock(have); 3490 netpoll_poll_unlock(have);
3448 } 3491 }
3449out: 3492out:
3450 net_rps_action_and_irq_disable(); 3493 net_rps_action_and_irq_enable(sd);
3451 3494
3452#ifdef CONFIG_NET_DMA 3495#ifdef CONFIG_NET_DMA
3453 /* 3496 /*
@@ -3460,7 +3503,7 @@ out:
3460 return; 3503 return;
3461 3504
3462softnet_break: 3505softnet_break:
3463 __get_cpu_var(netdev_rx_stat).time_squeeze++; 3506 sd->time_squeeze++;
3464 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 3507 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3465 goto out; 3508 goto out;
3466} 3509}
@@ -3661,17 +3704,17 @@ static int dev_seq_show(struct seq_file *seq, void *v)
3661 return 0; 3704 return 0;
3662} 3705}
3663 3706
3664static struct netif_rx_stats *softnet_get_online(loff_t *pos) 3707static struct softnet_data *softnet_get_online(loff_t *pos)
3665{ 3708{
3666 struct netif_rx_stats *rc = NULL; 3709 struct softnet_data *sd = NULL;
3667 3710
3668 while (*pos < nr_cpu_ids) 3711 while (*pos < nr_cpu_ids)
3669 if (cpu_online(*pos)) { 3712 if (cpu_online(*pos)) {
3670 rc = &per_cpu(netdev_rx_stat, *pos); 3713 sd = &per_cpu(softnet_data, *pos);
3671 break; 3714 break;
3672 } else 3715 } else
3673 ++*pos; 3716 ++*pos;
3674 return rc; 3717 return sd;
3675} 3718}
3676 3719
3677static void *softnet_seq_start(struct seq_file *seq, loff_t *pos) 3720static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
@@ -3691,12 +3734,12 @@ static void softnet_seq_stop(struct seq_file *seq, void *v)
3691 3734
3692static int softnet_seq_show(struct seq_file *seq, void *v) 3735static int softnet_seq_show(struct seq_file *seq, void *v)
3693{ 3736{
3694 struct netif_rx_stats *s = v; 3737 struct softnet_data *sd = v;
3695 3738
3696 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", 3739 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
3697 s->total, s->dropped, s->time_squeeze, 0, 3740 sd->processed, sd->dropped, sd->time_squeeze, 0,
3698 0, 0, 0, 0, /* was fastroute */ 3741 0, 0, 0, 0, /* was fastroute */
3699 s->cpu_collision, s->received_rps); 3742 sd->cpu_collision, sd->received_rps);
3700 return 0; 3743 return 0;
3701} 3744}
3702 3745
@@ -5584,7 +5627,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
5584 void *ocpu) 5627 void *ocpu)
5585{ 5628{
5586 struct sk_buff **list_skb; 5629 struct sk_buff **list_skb;
5587 struct Qdisc **list_net;
5588 struct sk_buff *skb; 5630 struct sk_buff *skb;
5589 unsigned int cpu, oldcpu = (unsigned long)ocpu; 5631 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5590 struct softnet_data *sd, *oldsd; 5632 struct softnet_data *sd, *oldsd;
@@ -5605,13 +5647,13 @@ static int dev_cpu_callback(struct notifier_block *nfb,
5605 *list_skb = oldsd->completion_queue; 5647 *list_skb = oldsd->completion_queue;
5606 oldsd->completion_queue = NULL; 5648 oldsd->completion_queue = NULL;
5607 5649
5608 /* Find end of our output_queue. */
5609 list_net = &sd->output_queue;
5610 while (*list_net)
5611 list_net = &(*list_net)->next_sched;
5612 /* Append output queue from offline CPU. */ 5650 /* Append output queue from offline CPU. */
5613 *list_net = oldsd->output_queue; 5651 if (oldsd->output_queue) {
5614 oldsd->output_queue = NULL; 5652 *sd->output_queue_tailp = oldsd->output_queue;
5653 sd->output_queue_tailp = oldsd->output_queue_tailp;
5654 oldsd->output_queue = NULL;
5655 oldsd->output_queue_tailp = &oldsd->output_queue;
5656 }
5615 5657
5616 raise_softirq_irqoff(NET_TX_SOFTIRQ); 5658 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5617 local_irq_enable(); 5659 local_irq_enable();
@@ -5619,8 +5661,10 @@ static int dev_cpu_callback(struct notifier_block *nfb,
5619 /* Process offline CPU's input_pkt_queue */ 5661 /* Process offline CPU's input_pkt_queue */
5620 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { 5662 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
5621 netif_rx(skb); 5663 netif_rx(skb);
5622 input_queue_head_incr(oldsd); 5664 input_queue_head_add(oldsd, 1);
5623 } 5665 }
5666 while ((skb = __skb_dequeue(&oldsd->process_queue)))
5667 netif_rx(skb);
5624 5668
5625 return NOTIFY_OK; 5669 return NOTIFY_OK;
5626} 5670}
@@ -5838,10 +5882,13 @@ static int __init net_dev_init(void)
5838 for_each_possible_cpu(i) { 5882 for_each_possible_cpu(i) {
5839 struct softnet_data *sd = &per_cpu(softnet_data, i); 5883 struct softnet_data *sd = &per_cpu(softnet_data, i);
5840 5884
5885 memset(sd, 0, sizeof(*sd));
5841 skb_queue_head_init(&sd->input_pkt_queue); 5886 skb_queue_head_init(&sd->input_pkt_queue);
5887 skb_queue_head_init(&sd->process_queue);
5842 sd->completion_queue = NULL; 5888 sd->completion_queue = NULL;
5843 INIT_LIST_HEAD(&sd->poll_list); 5889 INIT_LIST_HEAD(&sd->poll_list);
5844 5890 sd->output_queue = NULL;
5891 sd->output_queue_tailp = &sd->output_queue;
5845#ifdef CONFIG_RPS 5892#ifdef CONFIG_RPS
5846 sd->csd.func = rps_trigger_softirq; 5893 sd->csd.func = rps_trigger_softirq;
5847 sd->csd.info = sd; 5894 sd->csd.info = sd;
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 1bc66592453c..42e84e08a1be 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -122,7 +122,7 @@ errout:
122} 122}
123 123
124struct fib_rules_ops * 124struct fib_rules_ops *
125fib_rules_register(struct fib_rules_ops *tmpl, struct net *net) 125fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
126{ 126{
127 struct fib_rules_ops *ops; 127 struct fib_rules_ops *ops;
128 int err; 128 int err;
diff --git a/net/core/filter.c b/net/core/filter.c
index ff943bed21af..da69fb728d32 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -302,6 +302,8 @@ load_b:
302 A = skb->pkt_type; 302 A = skb->pkt_type;
303 continue; 303 continue;
304 case SKF_AD_IFINDEX: 304 case SKF_AD_IFINDEX:
305 if (!skb->dev)
306 return 0;
305 A = skb->dev->ifindex; 307 A = skb->dev->ifindex;
306 continue; 308 continue;
307 case SKF_AD_MARK: 309 case SKF_AD_MARK:
@@ -310,6 +312,11 @@ load_b:
310 case SKF_AD_QUEUE: 312 case SKF_AD_QUEUE:
311 A = skb->queue_mapping; 313 A = skb->queue_mapping;
312 continue; 314 continue;
315 case SKF_AD_HATYPE:
316 if (!skb->dev)
317 return 0;
318 A = skb->dev->type;
319 continue;
313 case SKF_AD_NLATTR: { 320 case SKF_AD_NLATTR: {
314 struct nlattr *nla; 321 struct nlattr *nla;
315 322
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index bd8c4712ea24..c988e685433a 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -27,6 +27,51 @@ EXPORT_SYMBOL(init_net);
27 27
28#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ 28#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
29 29
30static void net_generic_release(struct rcu_head *rcu)
31{
32 struct net_generic *ng;
33
34 ng = container_of(rcu, struct net_generic, rcu);
35 kfree(ng);
36}
37
38static int net_assign_generic(struct net *net, int id, void *data)
39{
40 struct net_generic *ng, *old_ng;
41
42 BUG_ON(!mutex_is_locked(&net_mutex));
43 BUG_ON(id == 0);
44
45 ng = old_ng = net->gen;
46 if (old_ng->len >= id)
47 goto assign;
48
49 ng = kzalloc(sizeof(struct net_generic) +
50 id * sizeof(void *), GFP_KERNEL);
51 if (ng == NULL)
52 return -ENOMEM;
53
54 /*
55 * Some synchronisation notes:
56 *
57 * The net_generic explores the net->gen array inside rcu
58 * read section. Besides once set the net->gen->ptr[x]
59 * pointer never changes (see rules in netns/generic.h).
60 *
61 * That said, we simply duplicate this array and schedule
62 * the old copy for kfree after a grace period.
63 */
64
65 ng->len = id;
66 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
67
68 rcu_assign_pointer(net->gen, ng);
69 call_rcu(&old_ng->rcu, net_generic_release);
70assign:
71 ng->ptr[id - 1] = data;
72 return 0;
73}
74
30static int ops_init(const struct pernet_operations *ops, struct net *net) 75static int ops_init(const struct pernet_operations *ops, struct net *net)
31{ 76{
32 int err; 77 int err;
@@ -469,10 +514,10 @@ EXPORT_SYMBOL_GPL(register_pernet_subsys);
469 * addition run the exit method for all existing network 514 * addition run the exit method for all existing network
470 * namespaces. 515 * namespaces.
471 */ 516 */
472void unregister_pernet_subsys(struct pernet_operations *module) 517void unregister_pernet_subsys(struct pernet_operations *ops)
473{ 518{
474 mutex_lock(&net_mutex); 519 mutex_lock(&net_mutex);
475 unregister_pernet_operations(module); 520 unregister_pernet_operations(ops);
476 mutex_unlock(&net_mutex); 521 mutex_unlock(&net_mutex);
477} 522}
478EXPORT_SYMBOL_GPL(unregister_pernet_subsys); 523EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
@@ -526,49 +571,3 @@ void unregister_pernet_device(struct pernet_operations *ops)
526 mutex_unlock(&net_mutex); 571 mutex_unlock(&net_mutex);
527} 572}
528EXPORT_SYMBOL_GPL(unregister_pernet_device); 573EXPORT_SYMBOL_GPL(unregister_pernet_device);
529
530static void net_generic_release(struct rcu_head *rcu)
531{
532 struct net_generic *ng;
533
534 ng = container_of(rcu, struct net_generic, rcu);
535 kfree(ng);
536}
537
538int net_assign_generic(struct net *net, int id, void *data)
539{
540 struct net_generic *ng, *old_ng;
541
542 BUG_ON(!mutex_is_locked(&net_mutex));
543 BUG_ON(id == 0);
544
545 ng = old_ng = net->gen;
546 if (old_ng->len >= id)
547 goto assign;
548
549 ng = kzalloc(sizeof(struct net_generic) +
550 id * sizeof(void *), GFP_KERNEL);
551 if (ng == NULL)
552 return -ENOMEM;
553
554 /*
555 * Some synchronisation notes:
556 *
557 * The net_generic explores the net->gen array inside rcu
558 * read section. Besides once set the net->gen->ptr[x]
559 * pointer never changes (see rules in netns/generic.h).
560 *
561 * That said, we simply duplicate this array and schedule
562 * the old copy for kfree after a grace period.
563 */
564
565 ng->len = id;
566 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
567
568 rcu_assign_pointer(net->gen, ng);
569 call_rcu(&old_ng->rcu, net_generic_release);
570assign:
571 ng->ptr[id - 1] = data;
572 return 0;
573}
574EXPORT_SYMBOL_GPL(net_assign_generic);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index a58f59b97597..94825b109551 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -179,9 +179,8 @@ static void service_arp_queue(struct netpoll_info *npi)
179 } 179 }
180} 180}
181 181
182void netpoll_poll(struct netpoll *np) 182void netpoll_poll_dev(struct net_device *dev)
183{ 183{
184 struct net_device *dev = np->dev;
185 const struct net_device_ops *ops; 184 const struct net_device_ops *ops;
186 185
187 if (!dev || !netif_running(dev)) 186 if (!dev || !netif_running(dev))
@@ -201,6 +200,11 @@ void netpoll_poll(struct netpoll *np)
201 zap_completion_queue(); 200 zap_completion_queue();
202} 201}
203 202
203void netpoll_poll(struct netpoll *np)
204{
205 netpoll_poll_dev(np->dev);
206}
207
204static void refill_skbs(void) 208static void refill_skbs(void)
205{ 209{
206 struct sk_buff *skb; 210 struct sk_buff *skb;
@@ -282,7 +286,7 @@ static int netpoll_owner_active(struct net_device *dev)
282 return 0; 286 return 0;
283} 287}
284 288
285static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) 289void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
286{ 290{
287 int status = NETDEV_TX_BUSY; 291 int status = NETDEV_TX_BUSY;
288 unsigned long tries; 292 unsigned long tries;
@@ -308,7 +312,9 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
308 tries > 0; --tries) { 312 tries > 0; --tries) {
309 if (__netif_tx_trylock(txq)) { 313 if (__netif_tx_trylock(txq)) {
310 if (!netif_tx_queue_stopped(txq)) { 314 if (!netif_tx_queue_stopped(txq)) {
315 dev->priv_flags |= IFF_IN_NETPOLL;
311 status = ops->ndo_start_xmit(skb, dev); 316 status = ops->ndo_start_xmit(skb, dev);
317 dev->priv_flags &= ~IFF_IN_NETPOLL;
312 if (status == NETDEV_TX_OK) 318 if (status == NETDEV_TX_OK)
313 txq_trans_update(txq); 319 txq_trans_update(txq);
314 } 320 }
@@ -756,7 +762,10 @@ int netpoll_setup(struct netpoll *np)
756 atomic_inc(&npinfo->refcnt); 762 atomic_inc(&npinfo->refcnt);
757 } 763 }
758 764
759 if (!ndev->netdev_ops->ndo_poll_controller) { 765 npinfo->netpoll = np;
766
767 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
768 !ndev->netdev_ops->ndo_poll_controller) {
760 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", 769 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
761 np->name, np->dev_name); 770 np->name, np->dev_name);
762 err = -ENOTSUPP; 771 err = -ENOTSUPP;
@@ -878,6 +887,7 @@ void netpoll_cleanup(struct netpoll *np)
878 } 887 }
879 888
880 if (atomic_dec_and_test(&npinfo->refcnt)) { 889 if (atomic_dec_and_test(&npinfo->refcnt)) {
890 const struct net_device_ops *ops;
881 skb_queue_purge(&npinfo->arp_tx); 891 skb_queue_purge(&npinfo->arp_tx);
882 skb_queue_purge(&npinfo->txq); 892 skb_queue_purge(&npinfo->txq);
883 cancel_rearming_delayed_work(&npinfo->tx_work); 893 cancel_rearming_delayed_work(&npinfo->tx_work);
@@ -885,7 +895,11 @@ void netpoll_cleanup(struct netpoll *np)
885 /* clean after last, unfinished work */ 895 /* clean after last, unfinished work */
886 __skb_queue_purge(&npinfo->txq); 896 __skb_queue_purge(&npinfo->txq);
887 kfree(npinfo); 897 kfree(npinfo);
888 np->dev->npinfo = NULL; 898 ops = np->dev->netdev_ops;
899 if (ops->ndo_netpoll_cleanup)
900 ops->ndo_netpoll_cleanup(np->dev);
901 else
902 np->dev->npinfo = NULL;
889 } 903 }
890 } 904 }
891 905
@@ -908,6 +922,7 @@ void netpoll_set_trap(int trap)
908 atomic_dec(&trapped); 922 atomic_dec(&trapped);
909} 923}
910 924
925EXPORT_SYMBOL(netpoll_send_skb);
911EXPORT_SYMBOL(netpoll_set_trap); 926EXPORT_SYMBOL(netpoll_set_trap);
912EXPORT_SYMBOL(netpoll_trap); 927EXPORT_SYMBOL(netpoll_trap);
913EXPORT_SYMBOL(netpoll_print_options); 928EXPORT_SYMBOL(netpoll_print_options);
@@ -915,4 +930,5 @@ EXPORT_SYMBOL(netpoll_parse_options);
915EXPORT_SYMBOL(netpoll_setup); 930EXPORT_SYMBOL(netpoll_setup);
916EXPORT_SYMBOL(netpoll_cleanup); 931EXPORT_SYMBOL(netpoll_cleanup);
917EXPORT_SYMBOL(netpoll_send_udp); 932EXPORT_SYMBOL(netpoll_send_udp);
933EXPORT_SYMBOL(netpoll_poll_dev);
918EXPORT_SYMBOL(netpoll_poll); 934EXPORT_SYMBOL(netpoll_poll);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 78c85985cb30..23a71cb21273 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -98,7 +98,7 @@ int lockdep_rtnl_is_held(void)
98EXPORT_SYMBOL(lockdep_rtnl_is_held); 98EXPORT_SYMBOL(lockdep_rtnl_is_held);
99#endif /* #ifdef CONFIG_PROVE_LOCKING */ 99#endif /* #ifdef CONFIG_PROVE_LOCKING */
100 100
101static struct rtnl_link *rtnl_msg_handlers[NPROTO]; 101static struct rtnl_link *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
102 102
103static inline int rtm_msgindex(int msgtype) 103static inline int rtm_msgindex(int msgtype)
104{ 104{
@@ -118,7 +118,7 @@ static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex)
118{ 118{
119 struct rtnl_link *tab; 119 struct rtnl_link *tab;
120 120
121 if (protocol < NPROTO) 121 if (protocol <= RTNL_FAMILY_MAX)
122 tab = rtnl_msg_handlers[protocol]; 122 tab = rtnl_msg_handlers[protocol];
123 else 123 else
124 tab = NULL; 124 tab = NULL;
@@ -133,7 +133,7 @@ static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex)
133{ 133{
134 struct rtnl_link *tab; 134 struct rtnl_link *tab;
135 135
136 if (protocol < NPROTO) 136 if (protocol <= RTNL_FAMILY_MAX)
137 tab = rtnl_msg_handlers[protocol]; 137 tab = rtnl_msg_handlers[protocol];
138 else 138 else
139 tab = NULL; 139 tab = NULL;
@@ -167,7 +167,7 @@ int __rtnl_register(int protocol, int msgtype,
167 struct rtnl_link *tab; 167 struct rtnl_link *tab;
168 int msgindex; 168 int msgindex;
169 169
170 BUG_ON(protocol < 0 || protocol >= NPROTO); 170 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
171 msgindex = rtm_msgindex(msgtype); 171 msgindex = rtm_msgindex(msgtype);
172 172
173 tab = rtnl_msg_handlers[protocol]; 173 tab = rtnl_msg_handlers[protocol];
@@ -219,7 +219,7 @@ int rtnl_unregister(int protocol, int msgtype)
219{ 219{
220 int msgindex; 220 int msgindex;
221 221
222 BUG_ON(protocol < 0 || protocol >= NPROTO); 222 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
223 msgindex = rtm_msgindex(msgtype); 223 msgindex = rtm_msgindex(msgtype);
224 224
225 if (rtnl_msg_handlers[protocol] == NULL) 225 if (rtnl_msg_handlers[protocol] == NULL)
@@ -241,7 +241,7 @@ EXPORT_SYMBOL_GPL(rtnl_unregister);
241 */ 241 */
242void rtnl_unregister_all(int protocol) 242void rtnl_unregister_all(int protocol)
243{ 243{
244 BUG_ON(protocol < 0 || protocol >= NPROTO); 244 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
245 245
246 kfree(rtnl_msg_handlers[protocol]); 246 kfree(rtnl_msg_handlers[protocol]);
247 rtnl_msg_handlers[protocol] = NULL; 247 rtnl_msg_handlers[protocol] = NULL;
@@ -1319,10 +1319,11 @@ replay:
1319 err = ops->newlink(net, dev, tb, data); 1319 err = ops->newlink(net, dev, tb, data);
1320 else 1320 else
1321 err = register_netdevice(dev); 1321 err = register_netdevice(dev);
1322 if (err < 0 && !IS_ERR(dev)) { 1322
1323 if (err < 0 && !IS_ERR(dev))
1323 free_netdev(dev); 1324 free_netdev(dev);
1325 if (err < 0)
1324 goto out; 1326 goto out;
1325 }
1326 1327
1327 err = rtnl_configure_link(dev, ifm); 1328 err = rtnl_configure_link(dev, ifm);
1328 if (err < 0) 1329 if (err < 0)
@@ -1384,7 +1385,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
1384 1385
1385 if (s_idx == 0) 1386 if (s_idx == 0)
1386 s_idx = 1; 1387 s_idx = 1;
1387 for (idx = 1; idx < NPROTO; idx++) { 1388 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
1388 int type = cb->nlh->nlmsg_type-RTM_BASE; 1389 int type = cb->nlh->nlmsg_type-RTM_BASE;
1389 if (idx < s_idx || idx == PF_PACKET) 1390 if (idx < s_idx || idx == PF_PACKET)
1390 continue; 1391 continue;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index bdea0efdf8cb..a9b0e1f77806 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -117,7 +117,7 @@ static const struct pipe_buf_operations sock_pipe_buf_ops = {
117 * 117 *
118 * Out of line support code for skb_put(). Not user callable. 118 * Out of line support code for skb_put(). Not user callable.
119 */ 119 */
120void skb_over_panic(struct sk_buff *skb, int sz, void *here) 120static void skb_over_panic(struct sk_buff *skb, int sz, void *here)
121{ 121{
122 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " 122 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
123 "data:%p tail:%#lx end:%#lx dev:%s\n", 123 "data:%p tail:%#lx end:%#lx dev:%s\n",
@@ -126,7 +126,6 @@ void skb_over_panic(struct sk_buff *skb, int sz, void *here)
126 skb->dev ? skb->dev->name : "<NULL>"); 126 skb->dev ? skb->dev->name : "<NULL>");
127 BUG(); 127 BUG();
128} 128}
129EXPORT_SYMBOL(skb_over_panic);
130 129
131/** 130/**
132 * skb_under_panic - private function 131 * skb_under_panic - private function
@@ -137,7 +136,7 @@ EXPORT_SYMBOL(skb_over_panic);
137 * Out of line support code for skb_push(). Not user callable. 136 * Out of line support code for skb_push(). Not user callable.
138 */ 137 */
139 138
140void skb_under_panic(struct sk_buff *skb, int sz, void *here) 139static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
141{ 140{
142 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " 141 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
143 "data:%p tail:%#lx end:%#lx dev:%s\n", 142 "data:%p tail:%#lx end:%#lx dev:%s\n",
@@ -146,7 +145,6 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
146 skb->dev ? skb->dev->name : "<NULL>"); 145 skb->dev ? skb->dev->name : "<NULL>");
147 BUG(); 146 BUG();
148} 147}
149EXPORT_SYMBOL(skb_under_panic);
150 148
151/* Allocate a new skbuff. We do this ourselves so we can fill in a few 149/* Allocate a new skbuff. We do this ourselves so we can fill in a few
152 * 'private' fields and also do memory statistics to find all the 150 * 'private' fields and also do memory statistics to find all the
@@ -183,12 +181,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
183 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 181 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
184 if (!skb) 182 if (!skb)
185 goto out; 183 goto out;
184 prefetchw(skb);
186 185
187 size = SKB_DATA_ALIGN(size); 186 size = SKB_DATA_ALIGN(size);
188 data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), 187 data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
189 gfp_mask, node); 188 gfp_mask, node);
190 if (!data) 189 if (!data)
191 goto nodata; 190 goto nodata;
191 prefetchw(data + size);
192 192
193 /* 193 /*
194 * Only clear those fields we need to clear, not those that we will 194 * Only clear those fields we need to clear, not those that we will
@@ -210,15 +210,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
210 210
211 /* make sure we initialize shinfo sequentially */ 211 /* make sure we initialize shinfo sequentially */
212 shinfo = skb_shinfo(skb); 212 shinfo = skb_shinfo(skb);
213 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
213 atomic_set(&shinfo->dataref, 1); 214 atomic_set(&shinfo->dataref, 1);
214 shinfo->nr_frags = 0;
215 shinfo->gso_size = 0;
216 shinfo->gso_segs = 0;
217 shinfo->gso_type = 0;
218 shinfo->ip6_frag_id = 0;
219 shinfo->tx_flags.flags = 0;
220 skb_frag_list_init(skb);
221 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
222 215
223 if (fclone) { 216 if (fclone) {
224 struct sk_buff *child = skb + 1; 217 struct sk_buff *child = skb + 1;
@@ -507,16 +500,10 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
507 return 0; 500 return 0;
508 501
509 skb_release_head_state(skb); 502 skb_release_head_state(skb);
503
510 shinfo = skb_shinfo(skb); 504 shinfo = skb_shinfo(skb);
505 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
511 atomic_set(&shinfo->dataref, 1); 506 atomic_set(&shinfo->dataref, 1);
512 shinfo->nr_frags = 0;
513 shinfo->gso_size = 0;
514 shinfo->gso_segs = 0;
515 shinfo->gso_type = 0;
516 shinfo->ip6_frag_id = 0;
517 shinfo->tx_flags.flags = 0;
518 skb_frag_list_init(skb);
519 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
520 507
521 memset(skb, 0, offsetof(struct sk_buff, tail)); 508 memset(skb, 0, offsetof(struct sk_buff, tail));
522 skb->data = skb->head + NET_SKB_PAD; 509 skb->data = skb->head + NET_SKB_PAD;
@@ -1053,7 +1040,7 @@ EXPORT_SYMBOL(skb_push);
1053 */ 1040 */
1054unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 1041unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
1055{ 1042{
1056 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); 1043 return skb_pull_inline(skb, len);
1057} 1044}
1058EXPORT_SYMBOL(skb_pull); 1045EXPORT_SYMBOL(skb_pull);
1059 1046
diff --git a/net/core/sock.c b/net/core/sock.c
index 7effa1e689df..94c4affdda9b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -327,6 +327,10 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
327 327
328 skb->dev = NULL; 328 skb->dev = NULL;
329 329
330 if (sk_rcvqueues_full(sk, skb)) {
331 atomic_inc(&sk->sk_drops);
332 goto discard_and_relse;
333 }
330 if (nested) 334 if (nested)
331 bh_lock_sock_nested(sk); 335 bh_lock_sock_nested(sk);
332 else 336 else
@@ -1207,7 +1211,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1207 */ 1211 */
1208 sk_refcnt_debug_inc(newsk); 1212 sk_refcnt_debug_inc(newsk);
1209 sk_set_socket(newsk, NULL); 1213 sk_set_socket(newsk, NULL);
1210 newsk->sk_sleep = NULL; 1214 newsk->sk_wq = NULL;
1211 1215
1212 if (newsk->sk_prot->sockets_allocated) 1216 if (newsk->sk_prot->sockets_allocated)
1213 percpu_counter_inc(newsk->sk_prot->sockets_allocated); 1217 percpu_counter_inc(newsk->sk_prot->sockets_allocated);
@@ -1395,7 +1399,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
1395 if (signal_pending(current)) 1399 if (signal_pending(current))
1396 break; 1400 break;
1397 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1401 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1398 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1402 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1399 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) 1403 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1400 break; 1404 break;
1401 if (sk->sk_shutdown & SEND_SHUTDOWN) 1405 if (sk->sk_shutdown & SEND_SHUTDOWN)
@@ -1404,7 +1408,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
1404 break; 1408 break;
1405 timeo = schedule_timeout(timeo); 1409 timeo = schedule_timeout(timeo);
1406 } 1410 }
1407 finish_wait(sk->sk_sleep, &wait); 1411 finish_wait(sk_sleep(sk), &wait);
1408 return timeo; 1412 return timeo;
1409} 1413}
1410 1414
@@ -1570,11 +1574,11 @@ int sk_wait_data(struct sock *sk, long *timeo)
1570 int rc; 1574 int rc;
1571 DEFINE_WAIT(wait); 1575 DEFINE_WAIT(wait);
1572 1576
1573 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1577 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1574 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1578 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1575 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); 1579 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1576 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1580 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1577 finish_wait(sk->sk_sleep, &wait); 1581 finish_wait(sk_sleep(sk), &wait);
1578 return rc; 1582 return rc;
1579} 1583}
1580EXPORT_SYMBOL(sk_wait_data); 1584EXPORT_SYMBOL(sk_wait_data);
@@ -1796,41 +1800,53 @@ EXPORT_SYMBOL(sock_no_sendpage);
1796 1800
1797static void sock_def_wakeup(struct sock *sk) 1801static void sock_def_wakeup(struct sock *sk)
1798{ 1802{
1799 read_lock(&sk->sk_callback_lock); 1803 struct socket_wq *wq;
1800 if (sk_has_sleeper(sk)) 1804
1801 wake_up_interruptible_all(sk->sk_sleep); 1805 rcu_read_lock();
1802 read_unlock(&sk->sk_callback_lock); 1806 wq = rcu_dereference(sk->sk_wq);
1807 if (wq_has_sleeper(wq))
1808 wake_up_interruptible_all(&wq->wait);
1809 rcu_read_unlock();
1803} 1810}
1804 1811
1805static void sock_def_error_report(struct sock *sk) 1812static void sock_def_error_report(struct sock *sk)
1806{ 1813{
1807 read_lock(&sk->sk_callback_lock); 1814 struct socket_wq *wq;
1808 if (sk_has_sleeper(sk)) 1815
1809 wake_up_interruptible_poll(sk->sk_sleep, POLLERR); 1816 rcu_read_lock();
1817 wq = rcu_dereference(sk->sk_wq);
1818 if (wq_has_sleeper(wq))
1819 wake_up_interruptible_poll(&wq->wait, POLLERR);
1810 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); 1820 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
1811 read_unlock(&sk->sk_callback_lock); 1821 rcu_read_unlock();
1812} 1822}
1813 1823
1814static void sock_def_readable(struct sock *sk, int len) 1824static void sock_def_readable(struct sock *sk, int len)
1815{ 1825{
1816 read_lock(&sk->sk_callback_lock); 1826 struct socket_wq *wq;
1817 if (sk_has_sleeper(sk)) 1827
1818 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN | 1828 rcu_read_lock();
1829 wq = rcu_dereference(sk->sk_wq);
1830 if (wq_has_sleeper(wq))
1831 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1819 POLLRDNORM | POLLRDBAND); 1832 POLLRDNORM | POLLRDBAND);
1820 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 1833 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
1821 read_unlock(&sk->sk_callback_lock); 1834 rcu_read_unlock();
1822} 1835}
1823 1836
1824static void sock_def_write_space(struct sock *sk) 1837static void sock_def_write_space(struct sock *sk)
1825{ 1838{
1826 read_lock(&sk->sk_callback_lock); 1839 struct socket_wq *wq;
1840
1841 rcu_read_lock();
1827 1842
1828 /* Do not wake up a writer until he can make "significant" 1843 /* Do not wake up a writer until he can make "significant"
1829 * progress. --DaveM 1844 * progress. --DaveM
1830 */ 1845 */
1831 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { 1846 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
1832 if (sk_has_sleeper(sk)) 1847 wq = rcu_dereference(sk->sk_wq);
1833 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT | 1848 if (wq_has_sleeper(wq))
1849 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1834 POLLWRNORM | POLLWRBAND); 1850 POLLWRNORM | POLLWRBAND);
1835 1851
1836 /* Should agree with poll, otherwise some programs break */ 1852 /* Should agree with poll, otherwise some programs break */
@@ -1838,7 +1854,7 @@ static void sock_def_write_space(struct sock *sk)
1838 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 1854 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
1839 } 1855 }
1840 1856
1841 read_unlock(&sk->sk_callback_lock); 1857 rcu_read_unlock();
1842} 1858}
1843 1859
1844static void sock_def_destruct(struct sock *sk) 1860static void sock_def_destruct(struct sock *sk)
@@ -1885,7 +1901,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1885 sk->sk_allocation = GFP_KERNEL; 1901 sk->sk_allocation = GFP_KERNEL;
1886 sk->sk_rcvbuf = sysctl_rmem_default; 1902 sk->sk_rcvbuf = sysctl_rmem_default;
1887 sk->sk_sndbuf = sysctl_wmem_default; 1903 sk->sk_sndbuf = sysctl_wmem_default;
1888 sk->sk_backlog.limit = sk->sk_rcvbuf << 1;
1889 sk->sk_state = TCP_CLOSE; 1904 sk->sk_state = TCP_CLOSE;
1890 sk_set_socket(sk, sock); 1905 sk_set_socket(sk, sock);
1891 1906
@@ -1893,10 +1908,10 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1893 1908
1894 if (sock) { 1909 if (sock) {
1895 sk->sk_type = sock->type; 1910 sk->sk_type = sock->type;
1896 sk->sk_sleep = &sock->wait; 1911 sk->sk_wq = sock->wq;
1897 sock->sk = sk; 1912 sock->sk = sk;
1898 } else 1913 } else
1899 sk->sk_sleep = NULL; 1914 sk->sk_wq = NULL;
1900 1915
1901 spin_lock_init(&sk->sk_dst_lock); 1916 spin_lock_init(&sk->sk_dst_lock);
1902 rwlock_init(&sk->sk_callback_lock); 1917 rwlock_init(&sk->sk_callback_lock);
diff --git a/net/core/stream.c b/net/core/stream.c
index a37debfeb1b2..cc196f42b8d8 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -28,15 +28,19 @@
28void sk_stream_write_space(struct sock *sk) 28void sk_stream_write_space(struct sock *sk)
29{ 29{
30 struct socket *sock = sk->sk_socket; 30 struct socket *sock = sk->sk_socket;
31 struct socket_wq *wq;
31 32
32 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) { 33 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) {
33 clear_bit(SOCK_NOSPACE, &sock->flags); 34 clear_bit(SOCK_NOSPACE, &sock->flags);
34 35
35 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 36 rcu_read_lock();
36 wake_up_interruptible_poll(sk->sk_sleep, POLLOUT | 37 wq = rcu_dereference(sk->sk_wq);
38 if (wq_has_sleeper(wq))
39 wake_up_interruptible_poll(&wq->wait, POLLOUT |
37 POLLWRNORM | POLLWRBAND); 40 POLLWRNORM | POLLWRBAND);
38 if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) 41 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
39 sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT); 42 sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT);
43 rcu_read_unlock();
40 } 44 }
41} 45}
42 46
@@ -66,13 +70,13 @@ int sk_stream_wait_connect(struct sock *sk, long *timeo_p)
66 if (signal_pending(tsk)) 70 if (signal_pending(tsk))
67 return sock_intr_errno(*timeo_p); 71 return sock_intr_errno(*timeo_p);
68 72
69 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 73 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
70 sk->sk_write_pending++; 74 sk->sk_write_pending++;
71 done = sk_wait_event(sk, timeo_p, 75 done = sk_wait_event(sk, timeo_p,
72 !sk->sk_err && 76 !sk->sk_err &&
73 !((1 << sk->sk_state) & 77 !((1 << sk->sk_state) &
74 ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))); 78 ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)));
75 finish_wait(sk->sk_sleep, &wait); 79 finish_wait(sk_sleep(sk), &wait);
76 sk->sk_write_pending--; 80 sk->sk_write_pending--;
77 } while (!done); 81 } while (!done);
78 return 0; 82 return 0;
@@ -96,13 +100,13 @@ void sk_stream_wait_close(struct sock *sk, long timeout)
96 DEFINE_WAIT(wait); 100 DEFINE_WAIT(wait);
97 101
98 do { 102 do {
99 prepare_to_wait(sk->sk_sleep, &wait, 103 prepare_to_wait(sk_sleep(sk), &wait,
100 TASK_INTERRUPTIBLE); 104 TASK_INTERRUPTIBLE);
101 if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk))) 105 if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk)))
102 break; 106 break;
103 } while (!signal_pending(current) && timeout); 107 } while (!signal_pending(current) && timeout);
104 108
105 finish_wait(sk->sk_sleep, &wait); 109 finish_wait(sk_sleep(sk), &wait);
106 } 110 }
107} 111}
108 112
@@ -126,7 +130,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
126 while (1) { 130 while (1) {
127 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 131 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
128 132
129 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 133 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
130 134
131 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 135 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
132 goto do_error; 136 goto do_error;
@@ -157,7 +161,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
157 *timeo_p = current_timeo; 161 *timeo_p = current_timeo;
158 } 162 }
159out: 163out:
160 finish_wait(sk->sk_sleep, &wait); 164 finish_wait(sk_sleep(sk), &wait);
161 return err; 165 return err;
162 166
163do_error: 167do_error:
diff --git a/net/dccp/output.c b/net/dccp/output.c
index e98b65e9569f..aadbdb58758b 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -195,15 +195,17 @@ EXPORT_SYMBOL_GPL(dccp_sync_mss);
195 195
196void dccp_write_space(struct sock *sk) 196void dccp_write_space(struct sock *sk)
197{ 197{
198 read_lock(&sk->sk_callback_lock); 198 struct socket_wq *wq;
199 199
200 if (sk_has_sleeper(sk)) 200 rcu_read_lock();
201 wake_up_interruptible(sk->sk_sleep); 201 wq = rcu_dereference(sk->sk_wq);
202 if (wq_has_sleeper(wq))
203 wake_up_interruptible(&wq->wait);
202 /* Should agree with poll, otherwise some programs break */ 204 /* Should agree with poll, otherwise some programs break */
203 if (sock_writeable(sk)) 205 if (sock_writeable(sk))
204 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 206 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
205 207
206 read_unlock(&sk->sk_callback_lock); 208 rcu_read_unlock();
207} 209}
208 210
209/** 211/**
@@ -225,7 +227,7 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay)
225 dccp_pr_debug("delayed send by %d msec\n", delay); 227 dccp_pr_debug("delayed send by %d msec\n", delay);
226 jiffdelay = msecs_to_jiffies(delay); 228 jiffdelay = msecs_to_jiffies(delay);
227 229
228 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 230 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
229 231
230 sk->sk_write_pending++; 232 sk->sk_write_pending++;
231 release_sock(sk); 233 release_sock(sk);
@@ -241,7 +243,7 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay)
241 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); 243 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
242 } while ((delay = rc) > 0); 244 } while ((delay = rc) > 0);
243out: 245out:
244 finish_wait(sk->sk_sleep, &wait); 246 finish_wait(sk_sleep(sk), &wait);
245 return rc; 247 return rc;
246 248
247do_error: 249do_error:
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index a0e38d8018f5..b03ecf6b2bb0 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -312,7 +312,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock,
312 unsigned int mask; 312 unsigned int mask;
313 struct sock *sk = sock->sk; 313 struct sock *sk = sock->sk;
314 314
315 sock_poll_wait(file, sk->sk_sleep, wait); 315 sock_poll_wait(file, sk_sleep(sk), wait);
316 if (sk->sk_state == DCCP_LISTEN) 316 if (sk->sk_state == DCCP_LISTEN)
317 return inet_csk_listen_poll(sk); 317 return inet_csk_listen_poll(sk);
318 318
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 55e3b6b0061a..d6b93d19790f 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -832,7 +832,7 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
832 scp->segsize_loc = dst_metric(__sk_dst_get(sk), RTAX_ADVMSS); 832 scp->segsize_loc = dst_metric(__sk_dst_get(sk), RTAX_ADVMSS);
833 dn_send_conn_conf(sk, allocation); 833 dn_send_conn_conf(sk, allocation);
834 834
835 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 835 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
836 for(;;) { 836 for(;;) {
837 release_sock(sk); 837 release_sock(sk);
838 if (scp->state == DN_CC) 838 if (scp->state == DN_CC)
@@ -850,9 +850,9 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
850 err = -EAGAIN; 850 err = -EAGAIN;
851 if (!*timeo) 851 if (!*timeo)
852 break; 852 break;
853 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 853 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
854 } 854 }
855 finish_wait(sk->sk_sleep, &wait); 855 finish_wait(sk_sleep(sk), &wait);
856 if (err == 0) { 856 if (err == 0) {
857 sk->sk_socket->state = SS_CONNECTED; 857 sk->sk_socket->state = SS_CONNECTED;
858 } else if (scp->state != DN_CC) { 858 } else if (scp->state != DN_CC) {
@@ -873,7 +873,7 @@ static int dn_wait_run(struct sock *sk, long *timeo)
873 if (!*timeo) 873 if (!*timeo)
874 return -EALREADY; 874 return -EALREADY;
875 875
876 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 876 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
877 for(;;) { 877 for(;;) {
878 release_sock(sk); 878 release_sock(sk);
879 if (scp->state == DN_CI || scp->state == DN_CC) 879 if (scp->state == DN_CI || scp->state == DN_CC)
@@ -891,9 +891,9 @@ static int dn_wait_run(struct sock *sk, long *timeo)
891 err = -ETIMEDOUT; 891 err = -ETIMEDOUT;
892 if (!*timeo) 892 if (!*timeo)
893 break; 893 break;
894 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 894 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
895 } 895 }
896 finish_wait(sk->sk_sleep, &wait); 896 finish_wait(sk_sleep(sk), &wait);
897out: 897out:
898 if (err == 0) { 898 if (err == 0) {
899 sk->sk_socket->state = SS_CONNECTED; 899 sk->sk_socket->state = SS_CONNECTED;
@@ -1040,7 +1040,7 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
1040 struct sk_buff *skb = NULL; 1040 struct sk_buff *skb = NULL;
1041 int err = 0; 1041 int err = 0;
1042 1042
1043 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1043 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1044 for(;;) { 1044 for(;;) {
1045 release_sock(sk); 1045 release_sock(sk);
1046 skb = skb_dequeue(&sk->sk_receive_queue); 1046 skb = skb_dequeue(&sk->sk_receive_queue);
@@ -1060,9 +1060,9 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
1060 err = -EAGAIN; 1060 err = -EAGAIN;
1061 if (!*timeo) 1061 if (!*timeo)
1062 break; 1062 break;
1063 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1063 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1064 } 1064 }
1065 finish_wait(sk->sk_sleep, &wait); 1065 finish_wait(sk_sleep(sk), &wait);
1066 1066
1067 return skb == NULL ? ERR_PTR(err) : skb; 1067 return skb == NULL ? ERR_PTR(err) : skb;
1068} 1068}
@@ -1746,11 +1746,11 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1746 goto out; 1746 goto out;
1747 } 1747 }
1748 1748
1749 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1749 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1750 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1750 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1751 sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target)); 1751 sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target));
1752 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1752 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1753 finish_wait(sk->sk_sleep, &wait); 1753 finish_wait(sk_sleep(sk), &wait);
1754 } 1754 }
1755 1755
1756 skb_queue_walk_safe(queue, skb, n) { 1756 skb_queue_walk_safe(queue, skb, n) {
@@ -2003,12 +2003,12 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
2003 goto out; 2003 goto out;
2004 } 2004 }
2005 2005
2006 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 2006 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2007 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2007 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2008 sk_wait_event(sk, &timeo, 2008 sk_wait_event(sk, &timeo,
2009 !dn_queue_too_long(scp, queue, flags)); 2009 !dn_queue_too_long(scp, queue, flags));
2010 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2010 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2011 finish_wait(sk->sk_sleep, &wait); 2011 finish_wait(sk_sleep(sk), &wait);
2012 continue; 2012 continue;
2013 } 2013 }
2014 2014
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index af28dcc21844..48fdf10be7a1 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -216,8 +216,8 @@ static void dn_fib_rule_flush_cache(struct fib_rules_ops *ops)
216 dn_rt_cache_flush(-1); 216 dn_rt_cache_flush(-1);
217} 217}
218 218
219static struct fib_rules_ops dn_fib_rules_ops_template = { 219static const struct fib_rules_ops __net_initdata dn_fib_rules_ops_template = {
220 .family = FIB_RULES_DECNET, 220 .family = AF_DECnet,
221 .rule_size = sizeof(struct dn_fib_rule), 221 .rule_size = sizeof(struct dn_fib_rule),
222 .addr_size = sizeof(u16), 222 .addr_size = sizeof(u16),
223 .action = dn_fib_rule_action, 223 .action = dn_fib_rule_action,
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 205a1c12f3c0..61ec0329316c 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -136,7 +136,7 @@ int eth_rebuild_header(struct sk_buff *skb)
136 default: 136 default:
137 printk(KERN_DEBUG 137 printk(KERN_DEBUG
138 "%s: unable to resolve type %X addresses.\n", 138 "%s: unable to resolve type %X addresses.\n",
139 dev->name, (int)eth->h_proto); 139 dev->name, ntohs(eth->h_proto));
140 140
141 memcpy(eth->h_source, dev->dev_addr, ETH_ALEN); 141 memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
142 break; 142 break;
@@ -162,7 +162,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
162 162
163 skb->dev = dev; 163 skb->dev = dev;
164 skb_reset_mac_header(skb); 164 skb_reset_mac_header(skb);
165 skb_pull(skb, ETH_HLEN); 165 skb_pull_inline(skb, ETH_HLEN);
166 eth = eth_hdr(skb); 166 eth = eth_hdr(skb);
167 167
168 if (unlikely(is_multicast_ether_addr(eth->h_dest))) { 168 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
index c7da600750bb..93c91b633a56 100644
--- a/net/ieee802154/af_ieee802154.c
+++ b/net/ieee802154/af_ieee802154.c
@@ -151,6 +151,9 @@ static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
151 dev_load(sock_net(sk), ifr.ifr_name); 151 dev_load(sock_net(sk), ifr.ifr_name);
152 dev = dev_get_by_name(sock_net(sk), ifr.ifr_name); 152 dev = dev_get_by_name(sock_net(sk), ifr.ifr_name);
153 153
154 if (!dev)
155 return -ENODEV;
156
154 if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl) 157 if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl)
155 ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd); 158 ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd);
156 159
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index c5376c725503..c6c43bcd1c6f 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -419,7 +419,7 @@ int inet_release(struct socket *sock)
419 if (sk) { 419 if (sk) {
420 long timeout; 420 long timeout;
421 421
422 inet_rps_reset_flow(sk); 422 sock_rps_reset_flow(sk);
423 423
424 /* Applications forget to leave groups before exiting */ 424 /* Applications forget to leave groups before exiting */
425 ip_mc_drop_socket(sk); 425 ip_mc_drop_socket(sk);
@@ -548,7 +548,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
548{ 548{
549 DEFINE_WAIT(wait); 549 DEFINE_WAIT(wait);
550 550
551 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 551 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
552 552
553 /* Basic assumption: if someone sets sk->sk_err, he _must_ 553 /* Basic assumption: if someone sets sk->sk_err, he _must_
554 * change state of the socket from TCP_SYN_*. 554 * change state of the socket from TCP_SYN_*.
@@ -561,9 +561,9 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
561 lock_sock(sk); 561 lock_sock(sk);
562 if (signal_pending(current) || !timeo) 562 if (signal_pending(current) || !timeo)
563 break; 563 break;
564 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 564 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
565 } 565 }
566 finish_wait(sk->sk_sleep, &wait); 566 finish_wait(sk_sleep(sk), &wait);
567 return timeo; 567 return timeo;
568} 568}
569 569
@@ -722,7 +722,7 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
722{ 722{
723 struct sock *sk = sock->sk; 723 struct sock *sk = sock->sk;
724 724
725 inet_rps_record_flow(sk); 725 sock_rps_record_flow(sk);
726 726
727 /* We may need to bind the socket. */ 727 /* We may need to bind the socket. */
728 if (!inet_sk(sk)->inet_num && inet_autobind(sk)) 728 if (!inet_sk(sk)->inet_num && inet_autobind(sk))
@@ -737,7 +737,7 @@ static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
737{ 737{
738 struct sock *sk = sock->sk; 738 struct sock *sk = sock->sk;
739 739
740 inet_rps_record_flow(sk); 740 sock_rps_record_flow(sk);
741 741
742 /* We may need to bind the socket. */ 742 /* We may need to bind the socket. */
743 if (!inet_sk(sk)->inet_num && inet_autobind(sk)) 743 if (!inet_sk(sk)->inet_num && inet_autobind(sk))
@@ -755,7 +755,7 @@ int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
755 int addr_len = 0; 755 int addr_len = 0;
756 int err; 756 int err;
757 757
758 inet_rps_record_flow(sk); 758 sock_rps_record_flow(sk);
759 759
760 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, 760 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
761 flags & ~MSG_DONTWAIT, &addr_len); 761 flags & ~MSG_DONTWAIT, &addr_len);
@@ -1323,8 +1323,8 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1323 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 1323 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
1324 goto out_unlock; 1324 goto out_unlock;
1325 1325
1326 id = ntohl(*(u32 *)&iph->id); 1326 id = ntohl(*(__be32 *)&iph->id);
1327 flush = (u16)((ntohl(*(u32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF)); 1327 flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF));
1328 id >>= 16; 1328 id >>= 16;
1329 1329
1330 for (p = *head; p; p = p->next) { 1330 for (p = *head; p; p = p->next) {
@@ -1337,8 +1337,8 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1337 1337
1338 if ((iph->protocol ^ iph2->protocol) | 1338 if ((iph->protocol ^ iph2->protocol) |
1339 (iph->tos ^ iph2->tos) | 1339 (iph->tos ^ iph2->tos) |
1340 (iph->saddr ^ iph2->saddr) | 1340 ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
1341 (iph->daddr ^ iph2->daddr)) { 1341 ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
1342 NAPI_GRO_CB(p)->same_flow = 0; 1342 NAPI_GRO_CB(p)->same_flow = 0;
1343 continue; 1343 continue;
1344 } 1344 }
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 3ec84fea5b71..76daeb5ff564 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -245,8 +245,8 @@ static void fib4_rule_flush_cache(struct fib_rules_ops *ops)
245 rt_cache_flush(ops->fro_net, -1); 245 rt_cache_flush(ops->fro_net, -1);
246} 246}
247 247
248static struct fib_rules_ops fib4_rules_ops_template = { 248static const struct fib_rules_ops __net_initdata fib4_rules_ops_template = {
249 .family = FIB_RULES_IPV4, 249 .family = AF_INET,
250 .rule_size = sizeof(struct fib4_rule), 250 .rule_size = sizeof(struct fib4_rule),
251 .addr_size = sizeof(u32), 251 .addr_size = sizeof(u32),
252 .action = fib4_rule_action, 252 .action = fib4_rule_action,
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 59a838795e3e..c98f115fb0fd 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -209,7 +209,9 @@ static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
209{ 209{
210 struct node *ret = tnode_get_child(tn, i); 210 struct node *ret = tnode_get_child(tn, i);
211 211
212 return rcu_dereference(ret); 212 return rcu_dereference_check(ret,
213 rcu_read_lock_held() ||
214 lockdep_rtnl_is_held());
213} 215}
214 216
215static inline int tnode_child_length(const struct tnode *tn) 217static inline int tnode_child_length(const struct tnode *tn)
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 8da6429269dd..e0a3e3537b14 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -234,7 +234,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
234 * having to remove and re-insert us on the wait queue. 234 * having to remove and re-insert us on the wait queue.
235 */ 235 */
236 for (;;) { 236 for (;;) {
237 prepare_to_wait_exclusive(sk->sk_sleep, &wait, 237 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
238 TASK_INTERRUPTIBLE); 238 TASK_INTERRUPTIBLE);
239 release_sock(sk); 239 release_sock(sk);
240 if (reqsk_queue_empty(&icsk->icsk_accept_queue)) 240 if (reqsk_queue_empty(&icsk->icsk_accept_queue))
@@ -253,7 +253,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
253 if (!timeo) 253 if (!timeo)
254 break; 254 break;
255 } 255 }
256 finish_wait(sk->sk_sleep, &wait); 256 finish_wait(sk_sleep(sk), &wait);
257 return err; 257 return err;
258} 258}
259 259
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index d979710684b2..252897443ef9 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -120,7 +120,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb)
120 newskb->pkt_type = PACKET_LOOPBACK; 120 newskb->pkt_type = PACKET_LOOPBACK;
121 newskb->ip_summed = CHECKSUM_UNNECESSARY; 121 newskb->ip_summed = CHECKSUM_UNNECESSARY;
122 WARN_ON(!skb_dst(newskb)); 122 WARN_ON(!skb_dst(newskb));
123 netif_rx(newskb); 123 netif_rx_ni(newskb);
124 return 0; 124 return 0;
125} 125}
126 126
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index b0aa0546a3b3..ce231780a2b1 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -954,6 +954,22 @@ e_inval:
954 return -EINVAL; 954 return -EINVAL;
955} 955}
956 956
957/**
958 * ip_queue_rcv_skb - Queue an skb into sock receive queue
959 * @sk: socket
960 * @skb: buffer
961 *
962 * Queues an skb into socket receive queue. If IP_CMSG_PKTINFO option
963 * is not set, we drop skb dst entry now, while dst cache line is hot.
964 */
965int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
966{
967 if (!(inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO))
968 skb_dst_drop(skb);
969 return sock_queue_rcv_skb(sk, skb);
970}
971EXPORT_SYMBOL(ip_queue_rcv_skb);
972
957int ip_setsockopt(struct sock *sk, int level, 973int ip_setsockopt(struct sock *sk, int level,
958 int optname, char __user *optval, unsigned int optlen) 974 int optname, char __user *optval, unsigned int optlen)
959{ 975{
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 1aa498d7a0a5..f3f1c6b5c70c 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -128,8 +128,8 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
128 int local); 128 int local);
129static int ipmr_cache_report(struct mr_table *mrt, 129static int ipmr_cache_report(struct mr_table *mrt,
130 struct sk_buff *pkt, vifi_t vifi, int assert); 130 struct sk_buff *pkt, vifi_t vifi, int assert);
131static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 131static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
132 struct mfc_cache *c, struct rtmsg *rtm); 132 struct mfc_cache *c, struct rtmsg *rtm);
133static void ipmr_expire_process(unsigned long arg); 133static void ipmr_expire_process(unsigned long arg);
134 134
135#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES 135#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -216,8 +216,8 @@ static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
216 return 0; 216 return 0;
217} 217}
218 218
219static struct fib_rules_ops ipmr_rules_ops_template = { 219static const struct fib_rules_ops __net_initdata ipmr_rules_ops_template = {
220 .family = FIB_RULES_IPMR, 220 .family = RTNL_FAMILY_IPMR,
221 .rule_size = sizeof(struct ipmr_rule), 221 .rule_size = sizeof(struct ipmr_rule),
222 .addr_size = sizeof(u32), 222 .addr_size = sizeof(u32),
223 .action = ipmr_rule_action, 223 .action = ipmr_rule_action,
@@ -831,7 +831,7 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
831 if (ip_hdr(skb)->version == 0) { 831 if (ip_hdr(skb)->version == 0) {
832 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); 832 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
833 833
834 if (ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) { 834 if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
835 nlh->nlmsg_len = (skb_tail_pointer(skb) - 835 nlh->nlmsg_len = (skb_tail_pointer(skb) -
836 (u8 *)nlh); 836 (u8 *)nlh);
837 } else { 837 } else {
@@ -1772,10 +1772,10 @@ int ip_mr_input(struct sk_buff *skb)
1772 1772
1773 vif = ipmr_find_vif(mrt, skb->dev); 1773 vif = ipmr_find_vif(mrt, skb->dev);
1774 if (vif >= 0) { 1774 if (vif >= 0) {
1775 int err = ipmr_cache_unresolved(mrt, vif, skb); 1775 int err2 = ipmr_cache_unresolved(mrt, vif, skb);
1776 read_unlock(&mrt_lock); 1776 read_unlock(&mrt_lock);
1777 1777
1778 return err; 1778 return err2;
1779 } 1779 }
1780 read_unlock(&mrt_lock); 1780 read_unlock(&mrt_lock);
1781 kfree_skb(skb); 1781 kfree_skb(skb);
@@ -1904,9 +1904,8 @@ drop:
1904} 1904}
1905#endif 1905#endif
1906 1906
1907static int 1907static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
1908ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c, 1908 struct mfc_cache *c, struct rtmsg *rtm)
1909 struct rtmsg *rtm)
1910{ 1909{
1911 int ct; 1910 int ct;
1912 struct rtnexthop *nhp; 1911 struct rtnexthop *nhp;
@@ -1994,11 +1993,93 @@ int ipmr_get_route(struct net *net,
1994 1993
1995 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) 1994 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1996 cache->mfc_flags |= MFC_NOTIFY; 1995 cache->mfc_flags |= MFC_NOTIFY;
1997 err = ipmr_fill_mroute(mrt, skb, cache, rtm); 1996 err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
1998 read_unlock(&mrt_lock); 1997 read_unlock(&mrt_lock);
1999 return err; 1998 return err;
2000} 1999}
2001 2000
2001static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2002 u32 pid, u32 seq, struct mfc_cache *c)
2003{
2004 struct nlmsghdr *nlh;
2005 struct rtmsg *rtm;
2006
2007 nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
2008 if (nlh == NULL)
2009 return -EMSGSIZE;
2010
2011 rtm = nlmsg_data(nlh);
2012 rtm->rtm_family = RTNL_FAMILY_IPMR;
2013 rtm->rtm_dst_len = 32;
2014 rtm->rtm_src_len = 32;
2015 rtm->rtm_tos = 0;
2016 rtm->rtm_table = mrt->id;
2017 NLA_PUT_U32(skb, RTA_TABLE, mrt->id);
2018 rtm->rtm_type = RTN_MULTICAST;
2019 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2020 rtm->rtm_protocol = RTPROT_UNSPEC;
2021 rtm->rtm_flags = 0;
2022
2023 NLA_PUT_BE32(skb, RTA_SRC, c->mfc_origin);
2024 NLA_PUT_BE32(skb, RTA_DST, c->mfc_mcastgrp);
2025
2026 if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0)
2027 goto nla_put_failure;
2028
2029 return nlmsg_end(skb, nlh);
2030
2031nla_put_failure:
2032 nlmsg_cancel(skb, nlh);
2033 return -EMSGSIZE;
2034}
2035
2036static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2037{
2038 struct net *net = sock_net(skb->sk);
2039 struct mr_table *mrt;
2040 struct mfc_cache *mfc;
2041 unsigned int t = 0, s_t;
2042 unsigned int h = 0, s_h;
2043 unsigned int e = 0, s_e;
2044
2045 s_t = cb->args[0];
2046 s_h = cb->args[1];
2047 s_e = cb->args[2];
2048
2049 read_lock(&mrt_lock);
2050 ipmr_for_each_table(mrt, net) {
2051 if (t < s_t)
2052 goto next_table;
2053 if (t > s_t)
2054 s_h = 0;
2055 for (h = s_h; h < MFC_LINES; h++) {
2056 list_for_each_entry(mfc, &mrt->mfc_cache_array[h], list) {
2057 if (e < s_e)
2058 goto next_entry;
2059 if (ipmr_fill_mroute(mrt, skb,
2060 NETLINK_CB(cb->skb).pid,
2061 cb->nlh->nlmsg_seq,
2062 mfc) < 0)
2063 goto done;
2064next_entry:
2065 e++;
2066 }
2067 e = s_e = 0;
2068 }
2069 s_h = 0;
2070next_table:
2071 t++;
2072 }
2073done:
2074 read_unlock(&mrt_lock);
2075
2076 cb->args[2] = e;
2077 cb->args[1] = h;
2078 cb->args[0] = t;
2079
2080 return skb->len;
2081}
2082
2002#ifdef CONFIG_PROC_FS 2083#ifdef CONFIG_PROC_FS
2003/* 2084/*
2004 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif 2085 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
@@ -2227,9 +2308,9 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2227 const struct ipmr_mfc_iter *it = seq->private; 2308 const struct ipmr_mfc_iter *it = seq->private;
2228 const struct mr_table *mrt = it->mrt; 2309 const struct mr_table *mrt = it->mrt;
2229 2310
2230 seq_printf(seq, "%08lX %08lX %-3hd", 2311 seq_printf(seq, "%08X %08X %-3hd",
2231 (unsigned long) mfc->mfc_mcastgrp, 2312 (__force u32) mfc->mfc_mcastgrp,
2232 (unsigned long) mfc->mfc_origin, 2313 (__force u32) mfc->mfc_origin,
2233 mfc->mfc_parent); 2314 mfc->mfc_parent);
2234 2315
2235 if (it->cache != &mrt->mfc_unres_queue) { 2316 if (it->cache != &mrt->mfc_unres_queue) {
@@ -2355,6 +2436,7 @@ int __init ip_mr_init(void)
2355 goto add_proto_fail; 2436 goto add_proto_fail;
2356 } 2437 }
2357#endif 2438#endif
2439 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE, NULL, ipmr_rtm_dumproute);
2358 return 0; 2440 return 0;
2359 2441
2360#ifdef CONFIG_IP_PIMSM_V2 2442#ifdef CONFIG_IP_PIMSM_V2
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index bbda0d5f9244..2c7a1639388a 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -290,7 +290,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
290{ 290{
291 /* Charge it to the socket. */ 291 /* Charge it to the socket. */
292 292
293 if (sock_queue_rcv_skb(sk, skb) < 0) { 293 if (ip_queue_rcv_skb(sk, skb) < 0) {
294 kfree_skb(skb); 294 kfree_skb(skb);
295 return NET_RX_DROP; 295 return NET_RX_DROP;
296 } 296 }
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cb562fdd9b9a..dea3f9264250 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -129,7 +129,6 @@ static int ip_rt_gc_elasticity __read_mostly = 8;
129static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; 129static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
130static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; 130static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
131static int ip_rt_min_advmss __read_mostly = 256; 131static int ip_rt_min_advmss __read_mostly = 256;
132static int ip_rt_secret_interval __read_mostly = 10 * 60 * HZ;
133static int rt_chain_length_max __read_mostly = 20; 132static int rt_chain_length_max __read_mostly = 20;
134 133
135static struct delayed_work expires_work; 134static struct delayed_work expires_work;
@@ -258,10 +257,9 @@ static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
258 (__raw_get_cpu_var(rt_cache_stat).field++) 257 (__raw_get_cpu_var(rt_cache_stat).field++)
259 258
260static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx, 259static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
261 int genid) 260 int genid)
262{ 261{
263 return jhash_3words((__force u32)(__be32)(daddr), 262 return jhash_3words((__force u32)daddr, (__force u32)saddr,
264 (__force u32)(__be32)(saddr),
265 idx, genid) 263 idx, genid)
266 & rt_hash_mask; 264 & rt_hash_mask;
267} 265}
@@ -378,12 +376,13 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
378 struct rtable *r = v; 376 struct rtable *r = v;
379 int len; 377 int len;
380 378
381 seq_printf(seq, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t" 379 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
382 "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", 380 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
383 r->u.dst.dev ? r->u.dst.dev->name : "*", 381 r->u.dst.dev ? r->u.dst.dev->name : "*",
384 (unsigned long)r->rt_dst, (unsigned long)r->rt_gateway, 382 (__force u32)r->rt_dst,
383 (__force u32)r->rt_gateway,
385 r->rt_flags, atomic_read(&r->u.dst.__refcnt), 384 r->rt_flags, atomic_read(&r->u.dst.__refcnt),
386 r->u.dst.__use, 0, (unsigned long)r->rt_src, 385 r->u.dst.__use, 0, (__force u32)r->rt_src,
387 (dst_metric(&r->u.dst, RTAX_ADVMSS) ? 386 (dst_metric(&r->u.dst, RTAX_ADVMSS) ?
388 (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0), 387 (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0),
389 dst_metric(&r->u.dst, RTAX_WINDOW), 388 dst_metric(&r->u.dst, RTAX_WINDOW),
@@ -685,18 +684,17 @@ static inline bool rt_caching(const struct net *net)
685static inline bool compare_hash_inputs(const struct flowi *fl1, 684static inline bool compare_hash_inputs(const struct flowi *fl1,
686 const struct flowi *fl2) 685 const struct flowi *fl2)
687{ 686{
688 return (__force u32)(((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) | 687 return ((((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) |
689 (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) | 688 ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) |
690 (fl1->iif ^ fl2->iif)) == 0); 689 (fl1->iif ^ fl2->iif)) == 0);
691} 690}
692 691
693static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) 692static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
694{ 693{
695 return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) | 694 return (((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) |
696 (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) | 695 ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) |
697 (fl1->mark ^ fl2->mark) | 696 (fl1->mark ^ fl2->mark) |
698 (*(u16 *)&fl1->nl_u.ip4_u.tos ^ 697 (*(u16 *)&fl1->nl_u.ip4_u.tos ^ *(u16 *)&fl2->nl_u.ip4_u.tos) |
699 *(u16 *)&fl2->nl_u.ip4_u.tos) |
700 (fl1->oif ^ fl2->oif) | 698 (fl1->oif ^ fl2->oif) |
701 (fl1->iif ^ fl2->iif)) == 0; 699 (fl1->iif ^ fl2->iif)) == 0;
702} 700}
@@ -919,32 +917,11 @@ void rt_cache_flush_batch(void)
919 rt_do_flush(!in_softirq()); 917 rt_do_flush(!in_softirq());
920} 918}
921 919
922/*
923 * We change rt_genid and let gc do the cleanup
924 */
925static void rt_secret_rebuild(unsigned long __net)
926{
927 struct net *net = (struct net *)__net;
928 rt_cache_invalidate(net);
929 mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval);
930}
931
932static void rt_secret_rebuild_oneshot(struct net *net)
933{
934 del_timer_sync(&net->ipv4.rt_secret_timer);
935 rt_cache_invalidate(net);
936 if (ip_rt_secret_interval)
937 mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval);
938}
939
940static void rt_emergency_hash_rebuild(struct net *net) 920static void rt_emergency_hash_rebuild(struct net *net)
941{ 921{
942 if (net_ratelimit()) { 922 if (net_ratelimit())
943 printk(KERN_WARNING "Route hash chain too long!\n"); 923 printk(KERN_WARNING "Route hash chain too long!\n");
944 printk(KERN_WARNING "Adjust your secret_interval!\n"); 924 rt_cache_invalidate(net);
945 }
946
947 rt_secret_rebuild_oneshot(net);
948} 925}
949 926
950/* 927/*
@@ -2319,8 +2296,8 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2319 rcu_read_lock(); 2296 rcu_read_lock();
2320 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2297 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2321 rth = rcu_dereference(rth->u.dst.rt_next)) { 2298 rth = rcu_dereference(rth->u.dst.rt_next)) {
2322 if (((rth->fl.fl4_dst ^ daddr) | 2299 if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) |
2323 (rth->fl.fl4_src ^ saddr) | 2300 ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) |
2324 (rth->fl.iif ^ iif) | 2301 (rth->fl.iif ^ iif) |
2325 rth->fl.oif | 2302 rth->fl.oif |
2326 (rth->fl.fl4_tos ^ tos)) == 0 && 2303 (rth->fl.fl4_tos ^ tos)) == 0 &&
@@ -3102,48 +3079,6 @@ static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
3102 return -EINVAL; 3079 return -EINVAL;
3103} 3080}
3104 3081
3105static void rt_secret_reschedule(int old)
3106{
3107 struct net *net;
3108 int new = ip_rt_secret_interval;
3109 int diff = new - old;
3110
3111 if (!diff)
3112 return;
3113
3114 rtnl_lock();
3115 for_each_net(net) {
3116 int deleted = del_timer_sync(&net->ipv4.rt_secret_timer);
3117 long time;
3118
3119 if (!new)
3120 continue;
3121
3122 if (deleted) {
3123 time = net->ipv4.rt_secret_timer.expires - jiffies;
3124
3125 if (time <= 0 || (time += diff) <= 0)
3126 time = 0;
3127 } else
3128 time = new;
3129
3130 mod_timer(&net->ipv4.rt_secret_timer, jiffies + time);
3131 }
3132 rtnl_unlock();
3133}
3134
3135static int ipv4_sysctl_rt_secret_interval(ctl_table *ctl, int write,
3136 void __user *buffer, size_t *lenp,
3137 loff_t *ppos)
3138{
3139 int old = ip_rt_secret_interval;
3140 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3141
3142 rt_secret_reschedule(old);
3143
3144 return ret;
3145}
3146
3147static ctl_table ipv4_route_table[] = { 3082static ctl_table ipv4_route_table[] = {
3148 { 3083 {
3149 .procname = "gc_thresh", 3084 .procname = "gc_thresh",
@@ -3252,13 +3187,6 @@ static ctl_table ipv4_route_table[] = {
3252 .mode = 0644, 3187 .mode = 0644,
3253 .proc_handler = proc_dointvec, 3188 .proc_handler = proc_dointvec,
3254 }, 3189 },
3255 {
3256 .procname = "secret_interval",
3257 .data = &ip_rt_secret_interval,
3258 .maxlen = sizeof(int),
3259 .mode = 0644,
3260 .proc_handler = ipv4_sysctl_rt_secret_interval,
3261 },
3262 { } 3190 { }
3263}; 3191};
3264 3192
@@ -3337,34 +3265,15 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
3337}; 3265};
3338#endif 3266#endif
3339 3267
3340 3268static __net_init int rt_genid_init(struct net *net)
3341static __net_init int rt_secret_timer_init(struct net *net)
3342{ 3269{
3343 atomic_set(&net->ipv4.rt_genid, 3270 get_random_bytes(&net->ipv4.rt_genid,
3344 (int) ((num_physpages ^ (num_physpages>>8)) ^ 3271 sizeof(net->ipv4.rt_genid));
3345 (jiffies ^ (jiffies >> 7))));
3346
3347 net->ipv4.rt_secret_timer.function = rt_secret_rebuild;
3348 net->ipv4.rt_secret_timer.data = (unsigned long)net;
3349 init_timer_deferrable(&net->ipv4.rt_secret_timer);
3350
3351 if (ip_rt_secret_interval) {
3352 net->ipv4.rt_secret_timer.expires =
3353 jiffies + net_random() % ip_rt_secret_interval +
3354 ip_rt_secret_interval;
3355 add_timer(&net->ipv4.rt_secret_timer);
3356 }
3357 return 0; 3272 return 0;
3358} 3273}
3359 3274
3360static __net_exit void rt_secret_timer_exit(struct net *net) 3275static __net_initdata struct pernet_operations rt_genid_ops = {
3361{ 3276 .init = rt_genid_init,
3362 del_timer_sync(&net->ipv4.rt_secret_timer);
3363}
3364
3365static __net_initdata struct pernet_operations rt_secret_timer_ops = {
3366 .init = rt_secret_timer_init,
3367 .exit = rt_secret_timer_exit,
3368}; 3277};
3369 3278
3370 3279
@@ -3425,9 +3334,6 @@ int __init ip_rt_init(void)
3425 schedule_delayed_work(&expires_work, 3334 schedule_delayed_work(&expires_work,
3426 net_random() % ip_rt_gc_interval + ip_rt_gc_interval); 3335 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3427 3336
3428 if (register_pernet_subsys(&rt_secret_timer_ops))
3429 printk(KERN_ERR "Unable to setup rt_secret_timer\n");
3430
3431 if (ip_rt_proc_init()) 3337 if (ip_rt_proc_init())
3432 printk(KERN_ERR "Unable to create route proc files\n"); 3338 printk(KERN_ERR "Unable to create route proc files\n");
3433#ifdef CONFIG_XFRM 3339#ifdef CONFIG_XFRM
@@ -3439,6 +3345,7 @@ int __init ip_rt_init(void)
3439#ifdef CONFIG_SYSCTL 3345#ifdef CONFIG_SYSCTL
3440 register_pernet_subsys(&sysctl_route_ops); 3346 register_pernet_subsys(&sysctl_route_ops);
3441#endif 3347#endif
3348 register_pernet_subsys(&rt_genid_ops);
3442 return rc; 3349 return rc;
3443} 3350}
3444 3351
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0f8caf64caa3..8ce29747ad9b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -378,7 +378,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
378 struct sock *sk = sock->sk; 378 struct sock *sk = sock->sk;
379 struct tcp_sock *tp = tcp_sk(sk); 379 struct tcp_sock *tp = tcp_sk(sk);
380 380
381 sock_poll_wait(file, sk->sk_sleep, wait); 381 sock_poll_wait(file, sk_sleep(sk), wait);
382 if (sk->sk_state == TCP_LISTEN) 382 if (sk->sk_state == TCP_LISTEN)
383 return inet_csk_listen_poll(sk); 383 return inet_csk_listen_poll(sk);
384 384
@@ -2298,7 +2298,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2298 if (sock_flag(sk, SOCK_KEEPOPEN) && 2298 if (sock_flag(sk, SOCK_KEEPOPEN) &&
2299 !((1 << sk->sk_state) & 2299 !((1 << sk->sk_state) &
2300 (TCPF_CLOSE | TCPF_LISTEN))) { 2300 (TCPF_CLOSE | TCPF_LISTEN))) {
2301 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp; 2301 u32 elapsed = keepalive_time_elapsed(tp);
2302 if (tp->keepalive_time > elapsed) 2302 if (tp->keepalive_time > elapsed)
2303 elapsed = tp->keepalive_time - elapsed; 2303 elapsed = tp->keepalive_time - elapsed;
2304 else 2304 else
@@ -2721,7 +2721,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2721 struct tcphdr *th2; 2721 struct tcphdr *th2;
2722 unsigned int len; 2722 unsigned int len;
2723 unsigned int thlen; 2723 unsigned int thlen;
2724 unsigned int flags; 2724 __be32 flags;
2725 unsigned int mss = 1; 2725 unsigned int mss = 1;
2726 unsigned int hlen; 2726 unsigned int hlen;
2727 unsigned int off; 2727 unsigned int off;
@@ -2771,10 +2771,10 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2771 2771
2772found: 2772found:
2773 flush = NAPI_GRO_CB(p)->flush; 2773 flush = NAPI_GRO_CB(p)->flush;
2774 flush |= flags & TCP_FLAG_CWR; 2774 flush |= (__force int)(flags & TCP_FLAG_CWR);
2775 flush |= (flags ^ tcp_flag_word(th2)) & 2775 flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
2776 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH); 2776 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
2777 flush |= th->ack_seq ^ th2->ack_seq; 2777 flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
2778 for (i = sizeof(*th); i < thlen; i += 4) 2778 for (i = sizeof(*th); i < thlen; i += 4)
2779 flush |= *(u32 *)((u8 *)th + i) ^ 2779 flush |= *(u32 *)((u8 *)th + i) ^
2780 *(u32 *)((u8 *)th2 + i); 2780 *(u32 *)((u8 *)th2 + i);
@@ -2795,8 +2795,9 @@ found:
2795 2795
2796out_check_final: 2796out_check_final:
2797 flush = len < mss; 2797 flush = len < mss;
2798 flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST | 2798 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
2799 TCP_FLAG_SYN | TCP_FLAG_FIN); 2799 TCP_FLAG_RST | TCP_FLAG_SYN |
2800 TCP_FLAG_FIN));
2800 2801
2801 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) 2802 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
2802 pp = head; 2803 pp = head;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ae3ec15fb630..e82162c211bf 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4367,6 +4367,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4367 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) 4367 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
4368 goto drop; 4368 goto drop;
4369 4369
4370 skb_dst_drop(skb);
4370 __skb_pull(skb, th->doff * 4); 4371 __skb_pull(skb, th->doff * 4);
4371 4372
4372 TCP_ECN_accept_cwr(tp, skb); 4373 TCP_ECN_accept_cwr(tp, skb);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index ad08392a738c..771f8146a2e5 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1286,8 +1286,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1286 goto drop_and_release; 1286 goto drop_and_release;
1287 1287
1288 /* Secret recipe starts with IP addresses */ 1288 /* Secret recipe starts with IP addresses */
1289 *mess++ ^= daddr; 1289 *mess++ ^= (__force u32)daddr;
1290 *mess++ ^= saddr; 1290 *mess++ ^= (__force u32)saddr;
1291 1291
1292 /* plus variable length Initiator Cookie */ 1292 /* plus variable length Initiator Cookie */
1293 c = (u8 *)mess; 1293 c = (u8 *)mess;
@@ -1672,7 +1672,7 @@ process:
1672 1672
1673 skb->dev = NULL; 1673 skb->dev = NULL;
1674 1674
1675 inet_rps_save_rxhash(sk, skb->rxhash); 1675 sock_rps_save_rxhash(sk, skb->rxhash);
1676 1676
1677 bh_lock_sock_nested(sk); 1677 bh_lock_sock_nested(sk);
1678 ret = 0; 1678 ret = 0;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 2b7d71fb8439..5db3a2c6cb33 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -861,7 +861,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
861 th->urg_ptr = htons(tp->snd_up - tcb->seq); 861 th->urg_ptr = htons(tp->snd_up - tcb->seq);
862 th->urg = 1; 862 th->urg = 1;
863 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { 863 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
864 th->urg_ptr = 0xFFFF; 864 th->urg_ptr = htons(0xFFFF);
865 th->urg = 1; 865 th->urg = 1;
866 } 866 }
867 } 867 }
@@ -888,7 +888,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
888 tcp_event_data_sent(tp, skb, sk); 888 tcp_event_data_sent(tp, skb, sk);
889 889
890 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 890 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
891 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); 891 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
892 tcp_skb_pcount(skb));
892 893
893 err = icsk->icsk_af_ops->queue_xmit(skb); 894 err = icsk->icsk_af_ops->queue_xmit(skb);
894 if (likely(err <= 0)) 895 if (likely(err <= 0))
@@ -2485,7 +2486,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2485 *tail-- ^= TCP_SKB_CB(skb)->seq + 1; 2486 *tail-- ^= TCP_SKB_CB(skb)->seq + 1;
2486 2487
2487 /* recommended */ 2488 /* recommended */
2488 *tail-- ^= ((th->dest << 16) | th->source); 2489 *tail-- ^= (((__force u32)th->dest << 16) | (__force u32)th->source);
2489 *tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */ 2490 *tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */
2490 2491
2491 sha_transform((__u32 *)&xvp->cookie_bakery[0], 2492 sha_transform((__u32 *)&xvp->cookie_bakery[0],
@@ -2503,7 +2504,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2503 th->window = htons(min(req->rcv_wnd, 65535U)); 2504 th->window = htons(min(req->rcv_wnd, 65535U));
2504 tcp_options_write((__be32 *)(th + 1), tp, &opts); 2505 tcp_options_write((__be32 *)(th + 1), tp, &opts);
2505 th->doff = (tcp_header_size >> 2); 2506 th->doff = (tcp_header_size >> 2);
2506 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); 2507 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb));
2507 2508
2508#ifdef CONFIG_TCP_MD5SIG 2509#ifdef CONFIG_TCP_MD5SIG
2509 /* Okay, we have all we need - do the md5 hash if needed */ 2510 /* Okay, we have all we need - do the md5 hash if needed */
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index c732be00606b..440a5c6004f6 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -517,7 +517,7 @@ static void tcp_keepalive_timer (unsigned long data)
517 struct sock *sk = (struct sock *) data; 517 struct sock *sk = (struct sock *) data;
518 struct inet_connection_sock *icsk = inet_csk(sk); 518 struct inet_connection_sock *icsk = inet_csk(sk);
519 struct tcp_sock *tp = tcp_sk(sk); 519 struct tcp_sock *tp = tcp_sk(sk);
520 __u32 elapsed; 520 u32 elapsed;
521 521
522 /* Only process if socket is not in use. */ 522 /* Only process if socket is not in use. */
523 bh_lock_sock(sk); 523 bh_lock_sock(sk);
@@ -554,7 +554,7 @@ static void tcp_keepalive_timer (unsigned long data)
554 if (tp->packets_out || tcp_send_head(sk)) 554 if (tp->packets_out || tcp_send_head(sk))
555 goto resched; 555 goto resched;
556 556
557 elapsed = tcp_time_stamp - tp->rcv_tstamp; 557 elapsed = keepalive_time_elapsed(tp);
558 558
559 if (elapsed >= keepalive_time_when(tp)) { 559 if (elapsed >= keepalive_time_when(tp)) {
560 if (icsk->icsk_probes_out >= keepalive_probes(tp)) { 560 if (icsk->icsk_probes_out >= keepalive_probes(tp)) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 666b963496ff..4560b291180b 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -307,13 +307,13 @@ static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
307static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr, 307static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr,
308 unsigned int port) 308 unsigned int port)
309{ 309{
310 return jhash_1word(saddr, net_hash_mix(net)) ^ port; 310 return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
311} 311}
312 312
313int udp_v4_get_port(struct sock *sk, unsigned short snum) 313int udp_v4_get_port(struct sock *sk, unsigned short snum)
314{ 314{
315 unsigned int hash2_nulladdr = 315 unsigned int hash2_nulladdr =
316 udp4_portaddr_hash(sock_net(sk), INADDR_ANY, snum); 316 udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
317 unsigned int hash2_partial = 317 unsigned int hash2_partial =
318 udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); 318 udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
319 319
@@ -466,14 +466,14 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
466 daddr, hnum, dif, 466 daddr, hnum, dif,
467 hslot2, slot2); 467 hslot2, slot2);
468 if (!result) { 468 if (!result) {
469 hash2 = udp4_portaddr_hash(net, INADDR_ANY, hnum); 469 hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
470 slot2 = hash2 & udptable->mask; 470 slot2 = hash2 & udptable->mask;
471 hslot2 = &udptable->hash2[slot2]; 471 hslot2 = &udptable->hash2[slot2];
472 if (hslot->count < hslot2->count) 472 if (hslot->count < hslot2->count)
473 goto begin; 473 goto begin;
474 474
475 result = udp4_lib_lookup2(net, saddr, sport, 475 result = udp4_lib_lookup2(net, saddr, sport,
476 INADDR_ANY, hnum, dif, 476 htonl(INADDR_ANY), hnum, dif,
477 hslot2, slot2); 477 hslot2, slot2);
478 } 478 }
479 rcu_read_unlock(); 479 rcu_read_unlock();
@@ -1062,10 +1062,10 @@ static unsigned int first_packet_length(struct sock *sk)
1062 spin_unlock_bh(&rcvq->lock); 1062 spin_unlock_bh(&rcvq->lock);
1063 1063
1064 if (!skb_queue_empty(&list_kill)) { 1064 if (!skb_queue_empty(&list_kill)) {
1065 lock_sock(sk); 1065 lock_sock_bh(sk);
1066 __skb_queue_purge(&list_kill); 1066 __skb_queue_purge(&list_kill);
1067 sk_mem_reclaim_partial(sk); 1067 sk_mem_reclaim_partial(sk);
1068 release_sock(sk); 1068 unlock_sock_bh(sk);
1069 } 1069 }
1070 return res; 1070 return res;
1071} 1071}
@@ -1196,10 +1196,10 @@ out:
1196 return err; 1196 return err;
1197 1197
1198csum_copy_err: 1198csum_copy_err:
1199 lock_sock(sk); 1199 lock_sock_bh(sk);
1200 if (!skb_kill_datagram(sk, skb, flags)) 1200 if (!skb_kill_datagram(sk, skb, flags))
1201 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 1201 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1202 release_sock(sk); 1202 unlock_sock_bh(sk);
1203 1203
1204 if (noblock) 1204 if (noblock)
1205 return -EAGAIN; 1205 return -EAGAIN;
@@ -1217,7 +1217,7 @@ int udp_disconnect(struct sock *sk, int flags)
1217 sk->sk_state = TCP_CLOSE; 1217 sk->sk_state = TCP_CLOSE;
1218 inet->inet_daddr = 0; 1218 inet->inet_daddr = 0;
1219 inet->inet_dport = 0; 1219 inet->inet_dport = 0;
1220 inet_rps_save_rxhash(sk, 0); 1220 sock_rps_save_rxhash(sk, 0);
1221 sk->sk_bound_dev_if = 0; 1221 sk->sk_bound_dev_if = 0;
1222 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 1222 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1223 inet_reset_saddr(sk); 1223 inet_reset_saddr(sk);
@@ -1262,9 +1262,9 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1262 int rc; 1262 int rc;
1263 1263
1264 if (inet_sk(sk)->inet_daddr) 1264 if (inet_sk(sk)->inet_daddr)
1265 inet_rps_save_rxhash(sk, skb->rxhash); 1265 sock_rps_save_rxhash(sk, skb->rxhash);
1266 1266
1267 rc = sock_queue_rcv_skb(sk, skb); 1267 rc = ip_queue_rcv_skb(sk, skb);
1268 if (rc < 0) { 1268 if (rc < 0) {
1269 int is_udplite = IS_UDPLITE(sk); 1269 int is_udplite = IS_UDPLITE(sk);
1270 1270
@@ -1372,6 +1372,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1372 goto drop; 1372 goto drop;
1373 } 1373 }
1374 1374
1375
1376 if (sk_rcvqueues_full(sk, skb))
1377 goto drop;
1378
1375 rc = 0; 1379 rc = 0;
1376 1380
1377 bh_lock_sock(sk); 1381 bh_lock_sock(sk);
@@ -1620,9 +1624,9 @@ int udp_rcv(struct sk_buff *skb)
1620 1624
1621void udp_destroy_sock(struct sock *sk) 1625void udp_destroy_sock(struct sock *sk)
1622{ 1626{
1623 lock_sock(sk); 1627 lock_sock_bh(sk);
1624 udp_flush_pending_frames(sk); 1628 udp_flush_pending_frames(sk);
1625 release_sock(sk); 1629 unlock_sock_bh(sk);
1626} 1630}
1627 1631
1628/* 1632/*
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 7cba8845242f..3984f52181f4 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -588,7 +588,8 @@ static u32 ipv6_addr_hash(const struct in6_addr *addr)
588 * We perform the hash function over the last 64 bits of the address 588 * We perform the hash function over the last 64 bits of the address
589 * This will include the IEEE address token on links that support it. 589 * This will include the IEEE address token on links that support it.
590 */ 590 */
591 return jhash_2words(addr->s6_addr32[2], addr->s6_addr32[3], 0) 591 return jhash_2words((__force u32)addr->s6_addr32[2],
592 (__force u32)addr->s6_addr32[3], 0)
592 & (IN6_ADDR_HSIZE - 1); 593 & (IN6_ADDR_HSIZE - 1);
593} 594}
594 595
@@ -1345,7 +1346,7 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
1345 struct hlist_node *node; 1346 struct hlist_node *node;
1346 1347
1347 rcu_read_lock_bh(); 1348 rcu_read_lock_bh();
1348 hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) { 1349 hlist_for_each_entry_rcu_bh(ifp, node, &inet6_addr_lst[hash], addr_lst) {
1349 if (!net_eq(dev_net(ifp->idev->dev), net)) 1350 if (!net_eq(dev_net(ifp->idev->dev), net))
1350 continue; 1351 continue;
1351 if (ipv6_addr_equal(&ifp->addr, addr)) { 1352 if (ipv6_addr_equal(&ifp->addr, addr)) {
@@ -2958,7 +2959,7 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq)
2958 2959
2959 for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { 2960 for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
2960 struct hlist_node *n; 2961 struct hlist_node *n;
2961 hlist_for_each_entry_rcu(ifa, n, &inet6_addr_lst[state->bucket], 2962 hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket],
2962 addr_lst) 2963 addr_lst)
2963 if (net_eq(dev_net(ifa->idev->dev), net)) 2964 if (net_eq(dev_net(ifa->idev->dev), net))
2964 return ifa; 2965 return ifa;
@@ -2973,12 +2974,12 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
2973 struct net *net = seq_file_net(seq); 2974 struct net *net = seq_file_net(seq);
2974 struct hlist_node *n = &ifa->addr_lst; 2975 struct hlist_node *n = &ifa->addr_lst;
2975 2976
2976 hlist_for_each_entry_continue_rcu(ifa, n, addr_lst) 2977 hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst)
2977 if (net_eq(dev_net(ifa->idev->dev), net)) 2978 if (net_eq(dev_net(ifa->idev->dev), net))
2978 return ifa; 2979 return ifa;
2979 2980
2980 while (++state->bucket < IN6_ADDR_HSIZE) { 2981 while (++state->bucket < IN6_ADDR_HSIZE) {
2981 hlist_for_each_entry(ifa, n, 2982 hlist_for_each_entry_rcu_bh(ifa, n,
2982 &inet6_addr_lst[state->bucket], addr_lst) { 2983 &inet6_addr_lst[state->bucket], addr_lst) {
2983 if (net_eq(dev_net(ifa->idev->dev), net)) 2984 if (net_eq(dev_net(ifa->idev->dev), net))
2984 return ifa; 2985 return ifa;
@@ -2999,7 +3000,7 @@ static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos)
2999} 3000}
3000 3001
3001static void *if6_seq_start(struct seq_file *seq, loff_t *pos) 3002static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
3002 __acquires(rcu) 3003 __acquires(rcu_bh)
3003{ 3004{
3004 rcu_read_lock_bh(); 3005 rcu_read_lock_bh();
3005 return if6_get_idx(seq, *pos); 3006 return if6_get_idx(seq, *pos);
@@ -3015,7 +3016,7 @@ static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3015} 3016}
3016 3017
3017static void if6_seq_stop(struct seq_file *seq, void *v) 3018static void if6_seq_stop(struct seq_file *seq, void *v)
3018 __releases(rcu) 3019 __releases(rcu_bh)
3019{ 3020{
3020 rcu_read_unlock_bh(); 3021 rcu_read_unlock_bh();
3021} 3022}
@@ -3092,7 +3093,7 @@ int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr)
3092 unsigned int hash = ipv6_addr_hash(addr); 3093 unsigned int hash = ipv6_addr_hash(addr);
3093 3094
3094 rcu_read_lock_bh(); 3095 rcu_read_lock_bh();
3095 hlist_for_each_entry_rcu(ifp, n, &inet6_addr_lst[hash], addr_lst) { 3096 hlist_for_each_entry_rcu_bh(ifp, n, &inet6_addr_lst[hash], addr_lst) {
3096 if (!net_eq(dev_net(ifp->idev->dev), net)) 3097 if (!net_eq(dev_net(ifp->idev->dev), net))
3097 continue; 3098 continue;
3098 if (ipv6_addr_equal(&ifp->addr, addr) && 3099 if (ipv6_addr_equal(&ifp->addr, addr) &&
@@ -3126,7 +3127,7 @@ static void addrconf_verify(unsigned long foo)
3126 3127
3127 for (i = 0; i < IN6_ADDR_HSIZE; i++) { 3128 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
3128restart: 3129restart:
3129 hlist_for_each_entry_rcu(ifp, node, 3130 hlist_for_each_entry_rcu_bh(ifp, node,
3130 &inet6_addr_lst[i], addr_lst) { 3131 &inet6_addr_lst[i], addr_lst) {
3131 unsigned long age; 3132 unsigned long age;
3132 3133
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 3192aa02ba5d..d2df3144429b 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -417,6 +417,9 @@ void inet6_destroy_sock(struct sock *sk)
417 if ((skb = xchg(&np->pktoptions, NULL)) != NULL) 417 if ((skb = xchg(&np->pktoptions, NULL)) != NULL)
418 kfree_skb(skb); 418 kfree_skb(skb);
419 419
420 if ((skb = xchg(&np->rxpmtu, NULL)) != NULL)
421 kfree_skb(skb);
422
420 /* Free flowlabels */ 423 /* Free flowlabels */
421 fl6_free_socklist(sk); 424 fl6_free_socklist(sk);
422 425
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 622dc7939a1b..5959230bc6c1 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -278,6 +278,45 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info)
278 kfree_skb(skb); 278 kfree_skb(skb);
279} 279}
280 280
281void ipv6_local_rxpmtu(struct sock *sk, struct flowi *fl, u32 mtu)
282{
283 struct ipv6_pinfo *np = inet6_sk(sk);
284 struct ipv6hdr *iph;
285 struct sk_buff *skb;
286 struct ip6_mtuinfo *mtu_info;
287
288 if (!np->rxopt.bits.rxpmtu)
289 return;
290
291 skb = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
292 if (!skb)
293 return;
294
295 skb_put(skb, sizeof(struct ipv6hdr));
296 skb_reset_network_header(skb);
297 iph = ipv6_hdr(skb);
298 ipv6_addr_copy(&iph->daddr, &fl->fl6_dst);
299
300 mtu_info = IP6CBMTU(skb);
301 if (!mtu_info) {
302 kfree_skb(skb);
303 return;
304 }
305
306 mtu_info->ip6m_mtu = mtu;
307 mtu_info->ip6m_addr.sin6_family = AF_INET6;
308 mtu_info->ip6m_addr.sin6_port = 0;
309 mtu_info->ip6m_addr.sin6_flowinfo = 0;
310 mtu_info->ip6m_addr.sin6_scope_id = fl->oif;
311 ipv6_addr_copy(&mtu_info->ip6m_addr.sin6_addr, &ipv6_hdr(skb)->daddr);
312
313 __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
314 skb_reset_transport_header(skb);
315
316 skb = xchg(&np->rxpmtu, skb);
317 kfree_skb(skb);
318}
319
281/* 320/*
282 * Handle MSG_ERRQUEUE 321 * Handle MSG_ERRQUEUE
283 */ 322 */
@@ -381,6 +420,54 @@ out:
381 return err; 420 return err;
382} 421}
383 422
423/*
424 * Handle IPV6_RECVPATHMTU
425 */
426int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len)
427{
428 struct ipv6_pinfo *np = inet6_sk(sk);
429 struct sk_buff *skb;
430 struct sockaddr_in6 *sin;
431 struct ip6_mtuinfo mtu_info;
432 int err;
433 int copied;
434
435 err = -EAGAIN;
436 skb = xchg(&np->rxpmtu, NULL);
437 if (skb == NULL)
438 goto out;
439
440 copied = skb->len;
441 if (copied > len) {
442 msg->msg_flags |= MSG_TRUNC;
443 copied = len;
444 }
445 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
446 if (err)
447 goto out_free_skb;
448
449 sock_recv_timestamp(msg, sk, skb);
450
451 memcpy(&mtu_info, IP6CBMTU(skb), sizeof(mtu_info));
452
453 sin = (struct sockaddr_in6 *)msg->msg_name;
454 if (sin) {
455 sin->sin6_family = AF_INET6;
456 sin->sin6_flowinfo = 0;
457 sin->sin6_port = 0;
458 sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id;
459 ipv6_addr_copy(&sin->sin6_addr, &mtu_info.ip6m_addr.sin6_addr);
460 }
461
462 put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info);
463
464 err = copied;
465
466out_free_skb:
467 kfree_skb(skb);
468out:
469 return err;
470}
384 471
385 472
386int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) 473int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
@@ -497,7 +584,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
497int datagram_send_ctl(struct net *net, 584int datagram_send_ctl(struct net *net,
498 struct msghdr *msg, struct flowi *fl, 585 struct msghdr *msg, struct flowi *fl,
499 struct ipv6_txoptions *opt, 586 struct ipv6_txoptions *opt,
500 int *hlimit, int *tclass) 587 int *hlimit, int *tclass, int *dontfrag)
501{ 588{
502 struct in6_pktinfo *src_info; 589 struct in6_pktinfo *src_info;
503 struct cmsghdr *cmsg; 590 struct cmsghdr *cmsg;
@@ -737,6 +824,25 @@ int datagram_send_ctl(struct net *net,
737 824
738 break; 825 break;
739 } 826 }
827
828 case IPV6_DONTFRAG:
829 {
830 int df;
831
832 err = -EINVAL;
833 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) {
834 goto exit_f;
835 }
836
837 df = *(int *)CMSG_DATA(cmsg);
838 if (df < 0 || df > 1)
839 goto exit_f;
840
841 err = 0;
842 *dontfrag = df;
843
844 break;
845 }
740 default: 846 default:
741 LIMIT_NETDEBUG(KERN_DEBUG "invalid cmsg type: %d\n", 847 LIMIT_NETDEBUG(KERN_DEBUG "invalid cmsg type: %d\n",
742 cmsg->cmsg_type); 848 cmsg->cmsg_type);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 8124f16f2ac2..8e44f8f9c188 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -237,8 +237,8 @@ static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule)
237 + nla_total_size(16); /* src */ 237 + nla_total_size(16); /* src */
238} 238}
239 239
240static struct fib_rules_ops fib6_rules_ops_template = { 240static const struct fib_rules_ops __net_initdata fib6_rules_ops_template = {
241 .family = FIB_RULES_IPV6, 241 .family = AF_INET6,
242 .rule_size = sizeof(struct fib6_rule), 242 .rule_size = sizeof(struct fib6_rule),
243 .addr_size = sizeof(struct in6_addr), 243 .addr_size = sizeof(struct in6_addr),
244 .action = fib6_rule_action, 244 .action = fib6_rule_action,
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 12d2fa42657d..ce7992982557 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -481,7 +481,7 @@ route_done:
481 len + sizeof(struct icmp6hdr), 481 len + sizeof(struct icmp6hdr),
482 sizeof(struct icmp6hdr), hlimit, 482 sizeof(struct icmp6hdr), hlimit,
483 np->tclass, NULL, &fl, (struct rt6_info*)dst, 483 np->tclass, NULL, &fl, (struct rt6_info*)dst,
484 MSG_DONTWAIT); 484 MSG_DONTWAIT, np->dontfrag);
485 if (err) { 485 if (err) {
486 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); 486 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
487 ip6_flush_pending_frames(sk); 487 ip6_flush_pending_frames(sk);
@@ -561,7 +561,8 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
561 561
562 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), 562 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
563 sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl, 563 sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl,
564 (struct rt6_info*)dst, MSG_DONTWAIT); 564 (struct rt6_info*)dst, MSG_DONTWAIT,
565 np->dontfrag);
565 566
566 if (err) { 567 if (err) {
567 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); 568 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index dc6e0b8f260d..92a122b7795d 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -144,7 +144,8 @@ static __inline__ __be32 addr_bit_set(void *token, int fn_bit)
144 * htonl(1 << ((~fn_bit)&0x1F)) 144 * htonl(1 << ((~fn_bit)&0x1F))
145 * See include/asm-generic/bitops/le.h. 145 * See include/asm-generic/bitops/le.h.
146 */ 146 */
147 return (1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)) & addr[fn_bit >> 5]; 147 return (__force __be32)(1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)) &
148 addr[fn_bit >> 5];
148} 149}
149 150
150static __inline__ struct fib6_node * node_alloc(void) 151static __inline__ struct fib6_node * node_alloc(void)
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 14e23216eb28..13654686aeab 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -360,7 +360,8 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
360 msg.msg_control = (void*)(fl->opt+1); 360 msg.msg_control = (void*)(fl->opt+1);
361 flowi.oif = 0; 361 flowi.oif = 0;
362 362
363 err = datagram_send_ctl(net, &msg, &flowi, fl->opt, &junk, &junk); 363 err = datagram_send_ctl(net, &msg, &flowi, fl->opt, &junk,
364 &junk, &junk);
364 if (err) 365 if (err)
365 goto done; 366 goto done;
366 err = -EINVAL; 367 err = -EINVAL;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 7f12e30cfa73..5173acaeb501 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -92,7 +92,7 @@ static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
92 newskb->ip_summed = CHECKSUM_UNNECESSARY; 92 newskb->ip_summed = CHECKSUM_UNNECESSARY;
93 WARN_ON(!skb_dst(newskb)); 93 WARN_ON(!skb_dst(newskb));
94 94
95 netif_rx(newskb); 95 netif_rx_ni(newskb);
96 return 0; 96 return 0;
97} 97}
98 98
@@ -216,8 +216,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
216 } 216 }
217 kfree_skb(skb); 217 kfree_skb(skb);
218 skb = skb2; 218 skb = skb2;
219 if (sk) 219 skb_set_owner_w(skb, sk);
220 skb_set_owner_w(skb, sk);
221 } 220 }
222 if (opt->opt_flen) 221 if (opt->opt_flen)
223 ipv6_push_frag_opts(skb, opt, &proto); 222 ipv6_push_frag_opts(skb, opt, &proto);
@@ -623,7 +622,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
623 /* We must not fragment if the socket is set to force MTU discovery 622 /* We must not fragment if the socket is set to force MTU discovery
624 * or if the skb it not generated by a local socket. 623 * or if the skb it not generated by a local socket.
625 */ 624 */
626 if (!skb->local_df) { 625 if (!skb->local_df && skb->len > mtu) {
627 skb->dev = skb_dst(skb)->dev; 626 skb->dev = skb_dst(skb)->dev;
628 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 627 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
629 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), 628 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
@@ -1103,7 +1102,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1103 int offset, int len, int odd, struct sk_buff *skb), 1102 int offset, int len, int odd, struct sk_buff *skb),
1104 void *from, int length, int transhdrlen, 1103 void *from, int length, int transhdrlen,
1105 int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl, 1104 int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl,
1106 struct rt6_info *rt, unsigned int flags) 1105 struct rt6_info *rt, unsigned int flags, int dontfrag)
1107{ 1106{
1108 struct inet_sock *inet = inet_sk(sk); 1107 struct inet_sock *inet = inet_sk(sk);
1109 struct ipv6_pinfo *np = inet6_sk(sk); 1108 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -1217,15 +1216,23 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1217 */ 1216 */
1218 1217
1219 inet->cork.length += length; 1218 inet->cork.length += length;
1220 if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) && 1219 if (length > mtu) {
1221 (rt->u.dst.dev->features & NETIF_F_UFO)) { 1220 int proto = sk->sk_protocol;
1221 if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){
1222 ipv6_local_rxpmtu(sk, fl, mtu-exthdrlen);
1223 return -EMSGSIZE;
1224 }
1222 1225
1223 err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len, 1226 if (proto == IPPROTO_UDP &&
1224 fragheaderlen, transhdrlen, mtu, 1227 (rt->u.dst.dev->features & NETIF_F_UFO)) {
1225 flags); 1228
1226 if (err) 1229 err = ip6_ufo_append_data(sk, getfrag, from, length,
1227 goto error; 1230 hh_len, fragheaderlen,
1228 return 0; 1231 transhdrlen, mtu, flags);
1232 if (err)
1233 goto error;
1234 return 0;
1235 }
1229 } 1236 }
1230 1237
1231 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) 1238 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 1160400e9dbd..bd43f0152c21 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -337,6 +337,13 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
337 retv = 0; 337 retv = 0;
338 break; 338 break;
339 339
340 case IPV6_RECVPATHMTU:
341 if (optlen < sizeof(int))
342 goto e_inval;
343 np->rxopt.bits.rxpmtu = valbool;
344 retv = 0;
345 break;
346
340 case IPV6_HOPOPTS: 347 case IPV6_HOPOPTS:
341 case IPV6_RTHDRDSTOPTS: 348 case IPV6_RTHDRDSTOPTS:
342 case IPV6_RTHDR: 349 case IPV6_RTHDR:
@@ -451,7 +458,8 @@ sticky_done:
451 msg.msg_controllen = optlen; 458 msg.msg_controllen = optlen;
452 msg.msg_control = (void*)(opt+1); 459 msg.msg_control = (void*)(opt+1);
453 460
454 retv = datagram_send_ctl(net, &msg, &fl, opt, &junk, &junk); 461 retv = datagram_send_ctl(net, &msg, &fl, opt, &junk, &junk,
462 &junk);
455 if (retv) 463 if (retv)
456 goto done; 464 goto done;
457update: 465update:
@@ -767,6 +775,17 @@ pref_skip_coa:
767 775
768 break; 776 break;
769 } 777 }
778 case IPV6_MINHOPCOUNT:
779 if (optlen < sizeof(int))
780 goto e_inval;
781 if (val < 0 || val > 255)
782 goto e_inval;
783 np->min_hopcount = val;
784 break;
785 case IPV6_DONTFRAG:
786 np->dontfrag = valbool;
787 retv = 0;
788 break;
770 } 789 }
771 790
772 release_sock(sk); 791 release_sock(sk);
@@ -1055,6 +1074,38 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
1055 val = np->rxopt.bits.rxflow; 1074 val = np->rxopt.bits.rxflow;
1056 break; 1075 break;
1057 1076
1077 case IPV6_RECVPATHMTU:
1078 val = np->rxopt.bits.rxpmtu;
1079 break;
1080
1081 case IPV6_PATHMTU:
1082 {
1083 struct dst_entry *dst;
1084 struct ip6_mtuinfo mtuinfo;
1085
1086 if (len < sizeof(mtuinfo))
1087 return -EINVAL;
1088
1089 len = sizeof(mtuinfo);
1090 memset(&mtuinfo, 0, sizeof(mtuinfo));
1091
1092 rcu_read_lock();
1093 dst = __sk_dst_get(sk);
1094 if (dst)
1095 mtuinfo.ip6m_mtu = dst_mtu(dst);
1096 rcu_read_unlock();
1097 if (!mtuinfo.ip6m_mtu)
1098 return -ENOTCONN;
1099
1100 if (put_user(len, optlen))
1101 return -EFAULT;
1102 if (copy_to_user(optval, &mtuinfo, len))
1103 return -EFAULT;
1104
1105 return 0;
1106 break;
1107 }
1108
1058 case IPV6_UNICAST_HOPS: 1109 case IPV6_UNICAST_HOPS:
1059 case IPV6_MULTICAST_HOPS: 1110 case IPV6_MULTICAST_HOPS:
1060 { 1111 {
@@ -1116,6 +1167,14 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
1116 val |= IPV6_PREFER_SRC_HOME; 1167 val |= IPV6_PREFER_SRC_HOME;
1117 break; 1168 break;
1118 1169
1170 case IPV6_MINHOPCOUNT:
1171 val = np->min_hopcount;
1172 break;
1173
1174 case IPV6_DONTFRAG:
1175 val = np->dontfrag;
1176 break;
1177
1119 default: 1178 default:
1120 return -ENOPROTOOPT; 1179 return -ENOPROTOOPT;
1121 } 1180 }
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index f9d05ce4e03a..59f1881968c7 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -44,6 +44,7 @@
44#include <linux/proc_fs.h> 44#include <linux/proc_fs.h>
45#include <linux/seq_file.h> 45#include <linux/seq_file.h>
46#include <linux/slab.h> 46#include <linux/slab.h>
47#include <net/mld.h>
47 48
48#include <linux/netfilter.h> 49#include <linux/netfilter.h>
49#include <linux/netfilter_ipv6.h> 50#include <linux/netfilter_ipv6.h>
@@ -71,54 +72,11 @@
71#define MDBG(x) 72#define MDBG(x)
72#endif 73#endif
73 74
74/* 75/* Ensure that we have struct in6_addr aligned on 32bit word. */
75 * These header formats should be in a separate include file, but icmpv6.h 76static void *__mld2_query_bugs[] __attribute__((__unused__)) = {
76 * doesn't have in6_addr defined in all cases, there is no __u128, and no 77 BUILD_BUG_ON_NULL(offsetof(struct mld2_query, mld2q_srcs) % 4),
77 * other files reference these. 78 BUILD_BUG_ON_NULL(offsetof(struct mld2_report, mld2r_grec) % 4),
78 * 79 BUILD_BUG_ON_NULL(offsetof(struct mld2_grec, grec_mca) % 4)
79 * +-DLS 4/14/03
80 */
81
82/* Multicast Listener Discovery version 2 headers */
83
84struct mld2_grec {
85 __u8 grec_type;
86 __u8 grec_auxwords;
87 __be16 grec_nsrcs;
88 struct in6_addr grec_mca;
89 struct in6_addr grec_src[0];
90};
91
92struct mld2_report {
93 __u8 type;
94 __u8 resv1;
95 __sum16 csum;
96 __be16 resv2;
97 __be16 ngrec;
98 struct mld2_grec grec[0];
99};
100
101struct mld2_query {
102 __u8 type;
103 __u8 code;
104 __sum16 csum;
105 __be16 mrc;
106 __be16 resv1;
107 struct in6_addr mca;
108#if defined(__LITTLE_ENDIAN_BITFIELD)
109 __u8 qrv:3,
110 suppress:1,
111 resv2:4;
112#elif defined(__BIG_ENDIAN_BITFIELD)
113 __u8 resv2:4,
114 suppress:1,
115 qrv:3;
116#else
117#error "Please fix <asm/byteorder.h>"
118#endif
119 __u8 qqic;
120 __be16 nsrcs;
121 struct in6_addr srcs[0];
122}; 80};
123 81
124static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT; 82static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
@@ -157,14 +115,6 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
157 ((idev)->mc_v1_seen && \ 115 ((idev)->mc_v1_seen && \
158 time_before(jiffies, (idev)->mc_v1_seen))) 116 time_before(jiffies, (idev)->mc_v1_seen)))
159 117
160#define MLDV2_MASK(value, nb) ((nb)>=32 ? (value) : ((1<<(nb))-1) & (value))
161#define MLDV2_EXP(thresh, nbmant, nbexp, value) \
162 ((value) < (thresh) ? (value) : \
163 ((MLDV2_MASK(value, nbmant) | (1<<(nbmant))) << \
164 (MLDV2_MASK((value) >> (nbmant), nbexp) + (nbexp))))
165
166#define MLDV2_MRC(value) MLDV2_EXP(0x8000, 12, 3, value)
167
168#define IPV6_MLD_MAX_MSF 64 118#define IPV6_MLD_MAX_MSF 64
169 119
170int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF; 120int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
@@ -1161,7 +1111,7 @@ int igmp6_event_query(struct sk_buff *skb)
1161 struct in6_addr *group; 1111 struct in6_addr *group;
1162 unsigned long max_delay; 1112 unsigned long max_delay;
1163 struct inet6_dev *idev; 1113 struct inet6_dev *idev;
1164 struct icmp6hdr *hdr; 1114 struct mld_msg *mld;
1165 int group_type; 1115 int group_type;
1166 int mark = 0; 1116 int mark = 0;
1167 int len; 1117 int len;
@@ -1182,8 +1132,8 @@ int igmp6_event_query(struct sk_buff *skb)
1182 if (idev == NULL) 1132 if (idev == NULL)
1183 return 0; 1133 return 0;
1184 1134
1185 hdr = icmp6_hdr(skb); 1135 mld = (struct mld_msg *)icmp6_hdr(skb);
1186 group = (struct in6_addr *) (hdr + 1); 1136 group = &mld->mld_mca;
1187 group_type = ipv6_addr_type(group); 1137 group_type = ipv6_addr_type(group);
1188 1138
1189 if (group_type != IPV6_ADDR_ANY && 1139 if (group_type != IPV6_ADDR_ANY &&
@@ -1197,7 +1147,7 @@ int igmp6_event_query(struct sk_buff *skb)
1197 /* MLDv1 router present */ 1147 /* MLDv1 router present */
1198 1148
1199 /* Translate milliseconds to jiffies */ 1149 /* Translate milliseconds to jiffies */
1200 max_delay = (ntohs(hdr->icmp6_maxdelay)*HZ)/1000; 1150 max_delay = (ntohs(mld->mld_maxdelay)*HZ)/1000;
1201 1151
1202 switchback = (idev->mc_qrv + 1) * max_delay; 1152 switchback = (idev->mc_qrv + 1) * max_delay;
1203 idev->mc_v1_seen = jiffies + switchback; 1153 idev->mc_v1_seen = jiffies + switchback;
@@ -1216,14 +1166,14 @@ int igmp6_event_query(struct sk_buff *skb)
1216 return -EINVAL; 1166 return -EINVAL;
1217 } 1167 }
1218 mlh2 = (struct mld2_query *)skb_transport_header(skb); 1168 mlh2 = (struct mld2_query *)skb_transport_header(skb);
1219 max_delay = (MLDV2_MRC(ntohs(mlh2->mrc))*HZ)/1000; 1169 max_delay = (MLDV2_MRC(ntohs(mlh2->mld2q_mrc))*HZ)/1000;
1220 if (!max_delay) 1170 if (!max_delay)
1221 max_delay = 1; 1171 max_delay = 1;
1222 idev->mc_maxdelay = max_delay; 1172 idev->mc_maxdelay = max_delay;
1223 if (mlh2->qrv) 1173 if (mlh2->mld2q_qrv)
1224 idev->mc_qrv = mlh2->qrv; 1174 idev->mc_qrv = mlh2->mld2q_qrv;
1225 if (group_type == IPV6_ADDR_ANY) { /* general query */ 1175 if (group_type == IPV6_ADDR_ANY) { /* general query */
1226 if (mlh2->nsrcs) { 1176 if (mlh2->mld2q_nsrcs) {
1227 in6_dev_put(idev); 1177 in6_dev_put(idev);
1228 return -EINVAL; /* no sources allowed */ 1178 return -EINVAL; /* no sources allowed */
1229 } 1179 }
@@ -1232,9 +1182,9 @@ int igmp6_event_query(struct sk_buff *skb)
1232 return 0; 1182 return 0;
1233 } 1183 }
1234 /* mark sources to include, if group & source-specific */ 1184 /* mark sources to include, if group & source-specific */
1235 if (mlh2->nsrcs != 0) { 1185 if (mlh2->mld2q_nsrcs != 0) {
1236 if (!pskb_may_pull(skb, srcs_offset + 1186 if (!pskb_may_pull(skb, srcs_offset +
1237 ntohs(mlh2->nsrcs) * sizeof(struct in6_addr))) { 1187 ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr))) {
1238 in6_dev_put(idev); 1188 in6_dev_put(idev);
1239 return -EINVAL; 1189 return -EINVAL;
1240 } 1190 }
@@ -1270,7 +1220,7 @@ int igmp6_event_query(struct sk_buff *skb)
1270 ma->mca_flags &= ~MAF_GSQUERY; 1220 ma->mca_flags &= ~MAF_GSQUERY;
1271 } 1221 }
1272 if (!(ma->mca_flags & MAF_GSQUERY) || 1222 if (!(ma->mca_flags & MAF_GSQUERY) ||
1273 mld_marksources(ma, ntohs(mlh2->nsrcs), mlh2->srcs)) 1223 mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs))
1274 igmp6_group_queried(ma, max_delay); 1224 igmp6_group_queried(ma, max_delay);
1275 spin_unlock_bh(&ma->mca_lock); 1225 spin_unlock_bh(&ma->mca_lock);
1276 break; 1226 break;
@@ -1286,9 +1236,8 @@ int igmp6_event_query(struct sk_buff *skb)
1286int igmp6_event_report(struct sk_buff *skb) 1236int igmp6_event_report(struct sk_buff *skb)
1287{ 1237{
1288 struct ifmcaddr6 *ma; 1238 struct ifmcaddr6 *ma;
1289 struct in6_addr *addrp;
1290 struct inet6_dev *idev; 1239 struct inet6_dev *idev;
1291 struct icmp6hdr *hdr; 1240 struct mld_msg *mld;
1292 int addr_type; 1241 int addr_type;
1293 1242
1294 /* Our own report looped back. Ignore it. */ 1243 /* Our own report looped back. Ignore it. */
@@ -1300,10 +1249,10 @@ int igmp6_event_report(struct sk_buff *skb)
1300 skb->pkt_type != PACKET_BROADCAST) 1249 skb->pkt_type != PACKET_BROADCAST)
1301 return 0; 1250 return 0;
1302 1251
1303 if (!pskb_may_pull(skb, sizeof(struct in6_addr))) 1252 if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr)))
1304 return -EINVAL; 1253 return -EINVAL;
1305 1254
1306 hdr = icmp6_hdr(skb); 1255 mld = (struct mld_msg *)icmp6_hdr(skb);
1307 1256
1308 /* Drop reports with not link local source */ 1257 /* Drop reports with not link local source */
1309 addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr); 1258 addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
@@ -1311,8 +1260,6 @@ int igmp6_event_report(struct sk_buff *skb)
1311 !(addr_type&IPV6_ADDR_LINKLOCAL)) 1260 !(addr_type&IPV6_ADDR_LINKLOCAL))
1312 return -EINVAL; 1261 return -EINVAL;
1313 1262
1314 addrp = (struct in6_addr *) (hdr + 1);
1315
1316 idev = in6_dev_get(skb->dev); 1263 idev = in6_dev_get(skb->dev);
1317 if (idev == NULL) 1264 if (idev == NULL)
1318 return -ENODEV; 1265 return -ENODEV;
@@ -1323,7 +1270,7 @@ int igmp6_event_report(struct sk_buff *skb)
1323 1270
1324 read_lock_bh(&idev->lock); 1271 read_lock_bh(&idev->lock);
1325 for (ma = idev->mc_list; ma; ma=ma->next) { 1272 for (ma = idev->mc_list; ma; ma=ma->next) {
1326 if (ipv6_addr_equal(&ma->mca_addr, addrp)) { 1273 if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) {
1327 spin_lock(&ma->mca_lock); 1274 spin_lock(&ma->mca_lock);
1328 if (del_timer(&ma->mca_timer)) 1275 if (del_timer(&ma->mca_timer))
1329 atomic_dec(&ma->mca_refcnt); 1276 atomic_dec(&ma->mca_refcnt);
@@ -1432,11 +1379,11 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
1432 skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data); 1379 skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
1433 skb_put(skb, sizeof(*pmr)); 1380 skb_put(skb, sizeof(*pmr));
1434 pmr = (struct mld2_report *)skb_transport_header(skb); 1381 pmr = (struct mld2_report *)skb_transport_header(skb);
1435 pmr->type = ICMPV6_MLD2_REPORT; 1382 pmr->mld2r_type = ICMPV6_MLD2_REPORT;
1436 pmr->resv1 = 0; 1383 pmr->mld2r_resv1 = 0;
1437 pmr->csum = 0; 1384 pmr->mld2r_cksum = 0;
1438 pmr->resv2 = 0; 1385 pmr->mld2r_resv2 = 0;
1439 pmr->ngrec = 0; 1386 pmr->mld2r_ngrec = 0;
1440 return skb; 1387 return skb;
1441} 1388}
1442 1389
@@ -1458,9 +1405,10 @@ static void mld_sendpack(struct sk_buff *skb)
1458 mldlen = skb->tail - skb->transport_header; 1405 mldlen = skb->tail - skb->transport_header;
1459 pip6->payload_len = htons(payload_len); 1406 pip6->payload_len = htons(payload_len);
1460 1407
1461 pmr->csum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen, 1408 pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
1462 IPPROTO_ICMPV6, csum_partial(skb_transport_header(skb), 1409 IPPROTO_ICMPV6,
1463 mldlen, 0)); 1410 csum_partial(skb_transport_header(skb),
1411 mldlen, 0));
1464 1412
1465 dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr); 1413 dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
1466 1414
@@ -1521,7 +1469,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1521 pgr->grec_nsrcs = 0; 1469 pgr->grec_nsrcs = 0;
1522 pgr->grec_mca = pmc->mca_addr; /* structure copy */ 1470 pgr->grec_mca = pmc->mca_addr; /* structure copy */
1523 pmr = (struct mld2_report *)skb_transport_header(skb); 1471 pmr = (struct mld2_report *)skb_transport_header(skb);
1524 pmr->ngrec = htons(ntohs(pmr->ngrec)+1); 1472 pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1);
1525 *ppgr = pgr; 1473 *ppgr = pgr;
1526 return skb; 1474 return skb;
1527} 1475}
@@ -1557,7 +1505,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1557 1505
1558 /* EX and TO_EX get a fresh packet, if needed */ 1506 /* EX and TO_EX get a fresh packet, if needed */
1559 if (truncate) { 1507 if (truncate) {
1560 if (pmr && pmr->ngrec && 1508 if (pmr && pmr->mld2r_ngrec &&
1561 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { 1509 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
1562 if (skb) 1510 if (skb)
1563 mld_sendpack(skb); 1511 mld_sendpack(skb);
@@ -1770,9 +1718,8 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1770 struct sock *sk = net->ipv6.igmp_sk; 1718 struct sock *sk = net->ipv6.igmp_sk;
1771 struct inet6_dev *idev; 1719 struct inet6_dev *idev;
1772 struct sk_buff *skb; 1720 struct sk_buff *skb;
1773 struct icmp6hdr *hdr; 1721 struct mld_msg *hdr;
1774 const struct in6_addr *snd_addr, *saddr; 1722 const struct in6_addr *snd_addr, *saddr;
1775 struct in6_addr *addrp;
1776 struct in6_addr addr_buf; 1723 struct in6_addr addr_buf;
1777 int err, len, payload_len, full_len; 1724 int err, len, payload_len, full_len;
1778 u8 ra[8] = { IPPROTO_ICMPV6, 0, 1725 u8 ra[8] = { IPPROTO_ICMPV6, 0,
@@ -1820,16 +1767,14 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1820 1767
1821 memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra)); 1768 memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
1822 1769
1823 hdr = (struct icmp6hdr *) skb_put(skb, sizeof(struct icmp6hdr)); 1770 hdr = (struct mld_msg *) skb_put(skb, sizeof(struct mld_msg));
1824 memset(hdr, 0, sizeof(struct icmp6hdr)); 1771 memset(hdr, 0, sizeof(struct mld_msg));
1825 hdr->icmp6_type = type; 1772 hdr->mld_type = type;
1773 ipv6_addr_copy(&hdr->mld_mca, addr);
1826 1774
1827 addrp = (struct in6_addr *) skb_put(skb, sizeof(struct in6_addr)); 1775 hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
1828 ipv6_addr_copy(addrp, addr); 1776 IPPROTO_ICMPV6,
1829 1777 csum_partial(hdr, len, 0));
1830 hdr->icmp6_cksum = csum_ipv6_magic(saddr, snd_addr, len,
1831 IPPROTO_ICMPV6,
1832 csum_partial(hdr, len, 0));
1833 1778
1834 idev = in6_dev_get(skb->dev); 1779 idev = in6_dev_get(skb->dev);
1835 1780
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 554b48b6e993..4a4dcbe4f8b2 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -381,7 +381,7 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
381 } 381 }
382 382
383 /* Charge it to the socket. */ 383 /* Charge it to the socket. */
384 if (sock_queue_rcv_skb(sk, skb) < 0) { 384 if (ip_queue_rcv_skb(sk, skb) < 0) {
385 kfree_skb(skb); 385 kfree_skb(skb);
386 return NET_RX_DROP; 386 return NET_RX_DROP;
387 } 387 }
@@ -461,6 +461,9 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
461 if (flags & MSG_ERRQUEUE) 461 if (flags & MSG_ERRQUEUE)
462 return ipv6_recv_error(sk, msg, len); 462 return ipv6_recv_error(sk, msg, len);
463 463
464 if (np->rxpmtu && np->rxopt.bits.rxpmtu)
465 return ipv6_recv_rxpmtu(sk, msg, len);
466
464 skb = skb_recv_datagram(sk, flags, noblock, &err); 467 skb = skb_recv_datagram(sk, flags, noblock, &err);
465 if (!skb) 468 if (!skb)
466 goto out; 469 goto out;
@@ -733,6 +736,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
733 int addr_len = msg->msg_namelen; 736 int addr_len = msg->msg_namelen;
734 int hlimit = -1; 737 int hlimit = -1;
735 int tclass = -1; 738 int tclass = -1;
739 int dontfrag = -1;
736 u16 proto; 740 u16 proto;
737 int err; 741 int err;
738 742
@@ -811,7 +815,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
811 memset(opt, 0, sizeof(struct ipv6_txoptions)); 815 memset(opt, 0, sizeof(struct ipv6_txoptions));
812 opt->tot_len = sizeof(struct ipv6_txoptions); 816 opt->tot_len = sizeof(struct ipv6_txoptions);
813 817
814 err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, &tclass); 818 err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit,
819 &tclass, &dontfrag);
815 if (err < 0) { 820 if (err < 0) {
816 fl6_sock_release(flowlabel); 821 fl6_sock_release(flowlabel);
817 return err; 822 return err;
@@ -880,6 +885,9 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
880 if (tclass < 0) 885 if (tclass < 0)
881 tclass = np->tclass; 886 tclass = np->tclass;
882 887
888 if (dontfrag < 0)
889 dontfrag = np->dontfrag;
890
883 if (msg->msg_flags&MSG_CONFIRM) 891 if (msg->msg_flags&MSG_CONFIRM)
884 goto do_confirm; 892 goto do_confirm;
885 893
@@ -890,7 +898,7 @@ back_from_confirm:
890 lock_sock(sk); 898 lock_sock(sk);
891 err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, 899 err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov,
892 len, 0, hlimit, tclass, opt, &fl, (struct rt6_info*)dst, 900 len, 0, hlimit, tclass, opt, &fl, (struct rt6_info*)dst,
893 msg->msg_flags); 901 msg->msg_flags, dontfrag);
894 902
895 if (err) 903 if (err)
896 ip6_flush_pending_frames(sk); 904 ip6_flush_pending_frames(sk);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c2438e8cb9d0..05ebd7833043 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -815,7 +815,7 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
815{ 815{
816 int flags = 0; 816 int flags = 0;
817 817
818 if (rt6_need_strict(&fl->fl6_dst)) 818 if (fl->oif || rt6_need_strict(&fl->fl6_dst))
819 flags |= RT6_LOOKUP_F_IFACE; 819 flags |= RT6_LOOKUP_F_IFACE;
820 820
821 if (!ipv6_addr_any(&fl->fl6_src)) 821 if (!ipv6_addr_any(&fl->fl6_src))
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index bd5ef7b6e48e..6603511e3673 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -353,6 +353,11 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
353 if (sk->sk_state == TCP_CLOSE) 353 if (sk->sk_state == TCP_CLOSE)
354 goto out; 354 goto out;
355 355
356 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
357 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
358 goto out;
359 }
360
356 tp = tcp_sk(sk); 361 tp = tcp_sk(sk);
357 seq = ntohl(th->seq); 362 seq = ntohl(th->seq);
358 if (sk->sk_state != TCP_LISTEN && 363 if (sk->sk_state != TCP_LISTEN &&
@@ -1018,7 +1023,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
1018 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); 1023 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1019 1024
1020 t1 = (struct tcphdr *) skb_push(buff, tot_len); 1025 t1 = (struct tcphdr *) skb_push(buff, tot_len);
1021 skb_reset_transport_header(skb); 1026 skb_reset_transport_header(buff);
1022 1027
1023 /* Swap the send and the receive. */ 1028 /* Swap the send and the receive. */
1024 memset(t1, 0, sizeof(*t1)); 1029 memset(t1, 0, sizeof(*t1));
@@ -1050,12 +1055,13 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
1050 } 1055 }
1051#endif 1056#endif
1052 1057
1053 buff->csum = csum_partial(t1, tot_len, 0);
1054
1055 memset(&fl, 0, sizeof(fl)); 1058 memset(&fl, 0, sizeof(fl));
1056 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr); 1059 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1057 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr); 1060 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1058 1061
1062 buff->ip_summed = CHECKSUM_PARTIAL;
1063 buff->csum = 0;
1064
1059 __tcp_v6_send_check(buff, &fl.fl6_src, &fl.fl6_dst); 1065 __tcp_v6_send_check(buff, &fl.fl6_src, &fl.fl6_dst);
1060 1066
1061 fl.proto = IPPROTO_TCP; 1067 fl.proto = IPPROTO_TCP;
@@ -1234,12 +1240,12 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1234 goto drop_and_free; 1240 goto drop_and_free;
1235 1241
1236 /* Secret recipe starts with IP addresses */ 1242 /* Secret recipe starts with IP addresses */
1237 d = &ipv6_hdr(skb)->daddr.s6_addr32[0]; 1243 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1238 *mess++ ^= *d++; 1244 *mess++ ^= *d++;
1239 *mess++ ^= *d++; 1245 *mess++ ^= *d++;
1240 *mess++ ^= *d++; 1246 *mess++ ^= *d++;
1241 *mess++ ^= *d++; 1247 *mess++ ^= *d++;
1242 d = &ipv6_hdr(skb)->saddr.s6_addr32[0]; 1248 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1243 *mess++ ^= *d++; 1249 *mess++ ^= *d++;
1244 *mess++ ^= *d++; 1250 *mess++ ^= *d++;
1245 *mess++ ^= *d++; 1251 *mess++ ^= *d++;
@@ -1677,6 +1683,7 @@ ipv6_pktoptions:
1677static int tcp_v6_rcv(struct sk_buff *skb) 1683static int tcp_v6_rcv(struct sk_buff *skb)
1678{ 1684{
1679 struct tcphdr *th; 1685 struct tcphdr *th;
1686 struct ipv6hdr *hdr;
1680 struct sock *sk; 1687 struct sock *sk;
1681 int ret; 1688 int ret;
1682 struct net *net = dev_net(skb->dev); 1689 struct net *net = dev_net(skb->dev);
@@ -1703,12 +1710,13 @@ static int tcp_v6_rcv(struct sk_buff *skb)
1703 goto bad_packet; 1710 goto bad_packet;
1704 1711
1705 th = tcp_hdr(skb); 1712 th = tcp_hdr(skb);
1713 hdr = ipv6_hdr(skb);
1706 TCP_SKB_CB(skb)->seq = ntohl(th->seq); 1714 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1707 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + 1715 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1708 skb->len - th->doff*4); 1716 skb->len - th->doff*4);
1709 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); 1717 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1710 TCP_SKB_CB(skb)->when = 0; 1718 TCP_SKB_CB(skb)->when = 0;
1711 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb)); 1719 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr);
1712 TCP_SKB_CB(skb)->sacked = 0; 1720 TCP_SKB_CB(skb)->sacked = 0;
1713 1721
1714 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); 1722 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
@@ -1719,6 +1727,11 @@ process:
1719 if (sk->sk_state == TCP_TIME_WAIT) 1727 if (sk->sk_state == TCP_TIME_WAIT)
1720 goto do_time_wait; 1728 goto do_time_wait;
1721 1729
1730 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1731 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1732 goto discard_and_relse;
1733 }
1734
1722 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 1735 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1723 goto discard_and_relse; 1736 goto discard_and_relse;
1724 1737
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 90824852f598..3d7a2c0b836a 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -91,9 +91,9 @@ static unsigned int udp6_portaddr_hash(struct net *net,
91 if (ipv6_addr_any(addr6)) 91 if (ipv6_addr_any(addr6))
92 hash = jhash_1word(0, mix); 92 hash = jhash_1word(0, mix);
93 else if (ipv6_addr_v4mapped(addr6)) 93 else if (ipv6_addr_v4mapped(addr6))
94 hash = jhash_1word(addr6->s6_addr32[3], mix); 94 hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix);
95 else 95 else
96 hash = jhash2(addr6->s6_addr32, 4, mix); 96 hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix);
97 97
98 return hash ^ port; 98 return hash ^ port;
99} 99}
@@ -335,6 +335,9 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
335 if (flags & MSG_ERRQUEUE) 335 if (flags & MSG_ERRQUEUE)
336 return ipv6_recv_error(sk, msg, len); 336 return ipv6_recv_error(sk, msg, len);
337 337
338 if (np->rxpmtu && np->rxopt.bits.rxpmtu)
339 return ipv6_recv_rxpmtu(sk, msg, len);
340
338try_again: 341try_again:
339 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), 342 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
340 &peeked, &err); 343 &peeked, &err);
@@ -421,7 +424,7 @@ out:
421 return err; 424 return err;
422 425
423csum_copy_err: 426csum_copy_err:
424 lock_sock(sk); 427 lock_sock_bh(sk);
425 if (!skb_kill_datagram(sk, skb, flags)) { 428 if (!skb_kill_datagram(sk, skb, flags)) {
426 if (is_udp4) 429 if (is_udp4)
427 UDP_INC_STATS_USER(sock_net(sk), 430 UDP_INC_STATS_USER(sock_net(sk),
@@ -430,7 +433,7 @@ csum_copy_err:
430 UDP6_INC_STATS_USER(sock_net(sk), 433 UDP6_INC_STATS_USER(sock_net(sk),
431 UDP_MIB_INERRORS, is_udplite); 434 UDP_MIB_INERRORS, is_udplite);
432 } 435 }
433 release_sock(sk); 436 unlock_sock_bh(sk);
434 437
435 if (flags & MSG_DONTWAIT) 438 if (flags & MSG_DONTWAIT)
436 return -EAGAIN; 439 return -EAGAIN;
@@ -511,7 +514,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
511 goto drop; 514 goto drop;
512 } 515 }
513 516
514 if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) { 517 if ((rc = ip_queue_rcv_skb(sk, skb)) < 0) {
515 /* Note that an ENOMEM error is charged twice */ 518 /* Note that an ENOMEM error is charged twice */
516 if (rc == -ENOMEM) 519 if (rc == -ENOMEM)
517 UDP6_INC_STATS_BH(sock_net(sk), 520 UDP6_INC_STATS_BH(sock_net(sk),
@@ -581,6 +584,10 @@ static void flush_stack(struct sock **stack, unsigned int count,
581 584
582 sk = stack[i]; 585 sk = stack[i];
583 if (skb1) { 586 if (skb1) {
587 if (sk_rcvqueues_full(sk, skb)) {
588 kfree_skb(skb1);
589 goto drop;
590 }
584 bh_lock_sock(sk); 591 bh_lock_sock(sk);
585 if (!sock_owned_by_user(sk)) 592 if (!sock_owned_by_user(sk))
586 udpv6_queue_rcv_skb(sk, skb1); 593 udpv6_queue_rcv_skb(sk, skb1);
@@ -692,7 +699,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
692 u32 ulen = 0; 699 u32 ulen = 0;
693 700
694 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 701 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
695 goto short_packet; 702 goto discard;
696 703
697 saddr = &ipv6_hdr(skb)->saddr; 704 saddr = &ipv6_hdr(skb)->saddr;
698 daddr = &ipv6_hdr(skb)->daddr; 705 daddr = &ipv6_hdr(skb)->daddr;
@@ -756,6 +763,10 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
756 763
757 /* deliver */ 764 /* deliver */
758 765
766 if (sk_rcvqueues_full(sk, skb)) {
767 sock_put(sk);
768 goto discard;
769 }
759 bh_lock_sock(sk); 770 bh_lock_sock(sk);
760 if (!sock_owned_by_user(sk)) 771 if (!sock_owned_by_user(sk))
761 udpv6_queue_rcv_skb(sk, skb); 772 udpv6_queue_rcv_skb(sk, skb);
@@ -770,9 +781,14 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
770 return 0; 781 return 0;
771 782
772short_packet: 783short_packet:
773 LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: %d/%u\n", 784 LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
774 proto == IPPROTO_UDPLITE ? "-Lite" : "", 785 proto == IPPROTO_UDPLITE ? "-Lite" : "",
775 ulen, skb->len); 786 saddr,
787 ntohs(uh->source),
788 ulen,
789 skb->len,
790 daddr,
791 ntohs(uh->dest));
776 792
777discard: 793discard:
778 UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 794 UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
@@ -919,6 +935,7 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
919 int ulen = len; 935 int ulen = len;
920 int hlimit = -1; 936 int hlimit = -1;
921 int tclass = -1; 937 int tclass = -1;
938 int dontfrag = -1;
922 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; 939 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
923 int err; 940 int err;
924 int connected = 0; 941 int connected = 0;
@@ -1049,7 +1066,8 @@ do_udp_sendmsg:
1049 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1066 memset(opt, 0, sizeof(struct ipv6_txoptions));
1050 opt->tot_len = sizeof(*opt); 1067 opt->tot_len = sizeof(*opt);
1051 1068
1052 err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, &tclass); 1069 err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit,
1070 &tclass, &dontfrag);
1053 if (err < 0) { 1071 if (err < 0) {
1054 fl6_sock_release(flowlabel); 1072 fl6_sock_release(flowlabel);
1055 return err; 1073 return err;
@@ -1120,6 +1138,9 @@ do_udp_sendmsg:
1120 if (tclass < 0) 1138 if (tclass < 0)
1121 tclass = np->tclass; 1139 tclass = np->tclass;
1122 1140
1141 if (dontfrag < 0)
1142 dontfrag = np->dontfrag;
1143
1123 if (msg->msg_flags&MSG_CONFIRM) 1144 if (msg->msg_flags&MSG_CONFIRM)
1124 goto do_confirm; 1145 goto do_confirm;
1125back_from_confirm: 1146back_from_confirm:
@@ -1143,7 +1164,7 @@ do_append_data:
1143 err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen, 1164 err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen,
1144 sizeof(struct udphdr), hlimit, tclass, opt, &fl, 1165 sizeof(struct udphdr), hlimit, tclass, opt, &fl,
1145 (struct rt6_info*)dst, 1166 (struct rt6_info*)dst,
1146 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 1167 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag);
1147 if (err) 1168 if (err)
1148 udp_v6_flush_pending_frames(sk); 1169 udp_v6_flush_pending_frames(sk);
1149 else if (!corkreq) 1170 else if (!corkreq)
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 8c452fd5ceae..4a0e77e14468 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -94,7 +94,7 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
94 xdst->u.dst.dev = dev; 94 xdst->u.dst.dev = dev;
95 dev_hold(dev); 95 dev_hold(dev);
96 96
97 xdst->u.rt6.rt6i_idev = in6_dev_get(rt->u.dst.dev); 97 xdst->u.rt6.rt6i_idev = in6_dev_get(dev);
98 if (!xdst->u.rt6.rt6i_idev) 98 if (!xdst->u.rt6.rt6i_idev)
99 return -ENODEV; 99 return -ENODEV;
100 100
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 2a4efcea3423..79986a674f6e 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -347,7 +347,7 @@ static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
347 self->tx_flow = flow; 347 self->tx_flow = flow;
348 IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n", 348 IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n",
349 __func__); 349 __func__);
350 wake_up_interruptible(sk->sk_sleep); 350 wake_up_interruptible(sk_sleep(sk));
351 break; 351 break;
352 default: 352 default:
353 IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __func__); 353 IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __func__);
@@ -900,7 +900,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
900 if (flags & O_NONBLOCK) 900 if (flags & O_NONBLOCK)
901 goto out; 901 goto out;
902 902
903 err = wait_event_interruptible(*(sk->sk_sleep), 903 err = wait_event_interruptible(*(sk_sleep(sk)),
904 skb_peek(&sk->sk_receive_queue)); 904 skb_peek(&sk->sk_receive_queue));
905 if (err) 905 if (err)
906 goto out; 906 goto out;
@@ -1066,7 +1066,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
1066 goto out; 1066 goto out;
1067 1067
1068 err = -ERESTARTSYS; 1068 err = -ERESTARTSYS;
1069 if (wait_event_interruptible(*(sk->sk_sleep), 1069 if (wait_event_interruptible(*(sk_sleep(sk)),
1070 (sk->sk_state != TCP_SYN_SENT))) 1070 (sk->sk_state != TCP_SYN_SENT)))
1071 goto out; 1071 goto out;
1072 1072
@@ -1318,7 +1318,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
1318 1318
1319 /* Check if IrTTP is wants us to slow down */ 1319 /* Check if IrTTP is wants us to slow down */
1320 1320
1321 if (wait_event_interruptible(*(sk->sk_sleep), 1321 if (wait_event_interruptible(*(sk_sleep(sk)),
1322 (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) { 1322 (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) {
1323 err = -ERESTARTSYS; 1323 err = -ERESTARTSYS;
1324 goto out; 1324 goto out;
@@ -1477,7 +1477,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
1477 if (copied >= target) 1477 if (copied >= target)
1478 break; 1478 break;
1479 1479
1480 prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1480 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1481 1481
1482 /* 1482 /*
1483 * POSIX 1003.1g mandates this order. 1483 * POSIX 1003.1g mandates this order.
@@ -1497,7 +1497,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
1497 /* Wait process until data arrives */ 1497 /* Wait process until data arrives */
1498 schedule(); 1498 schedule();
1499 1499
1500 finish_wait(sk->sk_sleep, &wait); 1500 finish_wait(sk_sleep(sk), &wait);
1501 1501
1502 if (err) 1502 if (err)
1503 goto out; 1503 goto out;
@@ -1787,7 +1787,7 @@ static unsigned int irda_poll(struct file * file, struct socket *sock,
1787 IRDA_DEBUG(4, "%s()\n", __func__); 1787 IRDA_DEBUG(4, "%s()\n", __func__);
1788 1788
1789 lock_kernel(); 1789 lock_kernel();
1790 poll_wait(file, sk->sk_sleep, wait); 1790 poll_wait(file, sk_sleep(sk), wait);
1791 mask = 0; 1791 mask = 0;
1792 1792
1793 /* Exceptional events? */ 1793 /* Exceptional events? */
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index c18286a2167b..8be324fe08b9 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -59,7 +59,7 @@ do { \
59 DEFINE_WAIT(__wait); \ 59 DEFINE_WAIT(__wait); \
60 long __timeo = timeo; \ 60 long __timeo = timeo; \
61 ret = 0; \ 61 ret = 0; \
62 prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \ 62 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
63 while (!(condition)) { \ 63 while (!(condition)) { \
64 if (!__timeo) { \ 64 if (!__timeo) { \
65 ret = -EAGAIN; \ 65 ret = -EAGAIN; \
@@ -76,7 +76,7 @@ do { \
76 if (ret) \ 76 if (ret) \
77 break; \ 77 break; \
78 } \ 78 } \
79 finish_wait(sk->sk_sleep, &__wait); \ 79 finish_wait(sk_sleep(sk), &__wait); \
80} while (0) 80} while (0)
81 81
82#define iucv_sock_wait(sk, condition, timeo) \ 82#define iucv_sock_wait(sk, condition, timeo) \
@@ -305,11 +305,14 @@ static inline int iucv_below_msglim(struct sock *sk)
305 */ 305 */
306static void iucv_sock_wake_msglim(struct sock *sk) 306static void iucv_sock_wake_msglim(struct sock *sk)
307{ 307{
308 read_lock(&sk->sk_callback_lock); 308 struct socket_wq *wq;
309 if (sk_has_sleeper(sk)) 309
310 wake_up_interruptible_all(sk->sk_sleep); 310 rcu_read_lock();
311 wq = rcu_dereference(sk->sk_wq);
312 if (wq_has_sleeper(wq))
313 wake_up_interruptible_all(&wq->wait);
311 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 314 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
312 read_unlock(&sk->sk_callback_lock); 315 rcu_read_unlock();
313} 316}
314 317
315/* Timers */ 318/* Timers */
@@ -795,7 +798,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
795 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 798 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
796 799
797 /* Wait for an incoming connection */ 800 /* Wait for an incoming connection */
798 add_wait_queue_exclusive(sk->sk_sleep, &wait); 801 add_wait_queue_exclusive(sk_sleep(sk), &wait);
799 while (!(nsk = iucv_accept_dequeue(sk, newsock))) { 802 while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
800 set_current_state(TASK_INTERRUPTIBLE); 803 set_current_state(TASK_INTERRUPTIBLE);
801 if (!timeo) { 804 if (!timeo) {
@@ -819,7 +822,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
819 } 822 }
820 823
821 set_current_state(TASK_RUNNING); 824 set_current_state(TASK_RUNNING);
822 remove_wait_queue(sk->sk_sleep, &wait); 825 remove_wait_queue(sk_sleep(sk), &wait);
823 826
824 if (err) 827 if (err)
825 goto done; 828 goto done;
@@ -1269,7 +1272,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1269 struct sock *sk = sock->sk; 1272 struct sock *sk = sock->sk;
1270 unsigned int mask = 0; 1273 unsigned int mask = 0;
1271 1274
1272 sock_poll_wait(file, sk->sk_sleep, wait); 1275 sock_poll_wait(file, sk_sleep(sk), wait);
1273 1276
1274 if (sk->sk_state == IUCV_LISTEN) 1277 if (sk->sk_state == IUCV_LISTEN)
1275 return iucv_accept_poll(sk); 1278 return iucv_accept_poll(sk);
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index ecc7aea9efe4..1712af1c7b3f 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1617,14 +1617,9 @@ EXPORT_SYMBOL_GPL(l2tp_session_create);
1617 1617
1618static __net_init int l2tp_init_net(struct net *net) 1618static __net_init int l2tp_init_net(struct net *net)
1619{ 1619{
1620 struct l2tp_net *pn; 1620 struct l2tp_net *pn = net_generic(net, l2tp_net_id);
1621 int err;
1622 int hash; 1621 int hash;
1623 1622
1624 pn = kzalloc(sizeof(*pn), GFP_KERNEL);
1625 if (!pn)
1626 return -ENOMEM;
1627
1628 INIT_LIST_HEAD(&pn->l2tp_tunnel_list); 1623 INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
1629 spin_lock_init(&pn->l2tp_tunnel_list_lock); 1624 spin_lock_init(&pn->l2tp_tunnel_list_lock);
1630 1625
@@ -1633,33 +1628,11 @@ static __net_init int l2tp_init_net(struct net *net)
1633 1628
1634 spin_lock_init(&pn->l2tp_session_hlist_lock); 1629 spin_lock_init(&pn->l2tp_session_hlist_lock);
1635 1630
1636 err = net_assign_generic(net, l2tp_net_id, pn);
1637 if (err)
1638 goto out;
1639
1640 return 0; 1631 return 0;
1641
1642out:
1643 kfree(pn);
1644 return err;
1645}
1646
1647static __net_exit void l2tp_exit_net(struct net *net)
1648{
1649 struct l2tp_net *pn;
1650
1651 pn = net_generic(net, l2tp_net_id);
1652 /*
1653 * if someone has cached our net then
1654 * further net_generic call will return NULL
1655 */
1656 net_assign_generic(net, l2tp_net_id, NULL);
1657 kfree(pn);
1658} 1632}
1659 1633
1660static struct pernet_operations l2tp_net_ops = { 1634static struct pernet_operations l2tp_net_ops = {
1661 .init = l2tp_init_net, 1635 .init = l2tp_init_net,
1662 .exit = l2tp_exit_net,
1663 .id = &l2tp_net_id, 1636 .id = &l2tp_net_id,
1664 .size = sizeof(struct l2tp_net), 1637 .size = sizeof(struct l2tp_net),
1665}; 1638};
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index ca1164afeb74..58c6c4cda73b 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -276,43 +276,16 @@ out:
276 276
277static __net_init int l2tp_eth_init_net(struct net *net) 277static __net_init int l2tp_eth_init_net(struct net *net)
278{ 278{
279 struct l2tp_eth_net *pn; 279 struct l2tp_eth_net *pn = net_generic(net, l2tp_eth_net_id);
280 int err;
281
282 pn = kzalloc(sizeof(*pn), GFP_KERNEL);
283 if (!pn)
284 return -ENOMEM;
285 280
286 INIT_LIST_HEAD(&pn->l2tp_eth_dev_list); 281 INIT_LIST_HEAD(&pn->l2tp_eth_dev_list);
287 spin_lock_init(&pn->l2tp_eth_lock); 282 spin_lock_init(&pn->l2tp_eth_lock);
288 283
289 err = net_assign_generic(net, l2tp_eth_net_id, pn);
290 if (err)
291 goto out;
292
293 return 0; 284 return 0;
294
295out:
296 kfree(pn);
297 return err;
298}
299
300static __net_exit void l2tp_eth_exit_net(struct net *net)
301{
302 struct l2tp_eth_net *pn;
303
304 pn = net_generic(net, l2tp_eth_net_id);
305 /*
306 * if someone has cached our net then
307 * further net_generic call will return NULL
308 */
309 net_assign_generic(net, l2tp_eth_net_id, NULL);
310 kfree(pn);
311} 285}
312 286
313static __net_initdata struct pernet_operations l2tp_eth_net_ops = { 287static __net_initdata struct pernet_operations l2tp_eth_net_ops = {
314 .init = l2tp_eth_init_net, 288 .init = l2tp_eth_init_net,
315 .exit = l2tp_eth_exit_net,
316 .id = &l2tp_eth_net_id, 289 .id = &l2tp_eth_net_id,
317 .size = sizeof(struct l2tp_eth_net), 290 .size = sizeof(struct l2tp_eth_net),
318}; 291};
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 2db6a9f75913..023ba820236f 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -536,7 +536,7 @@ static int llc_ui_wait_for_disc(struct sock *sk, long timeout)
536 int rc = 0; 536 int rc = 0;
537 537
538 while (1) { 538 while (1) {
539 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 539 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
540 if (sk_wait_event(sk, &timeout, sk->sk_state == TCP_CLOSE)) 540 if (sk_wait_event(sk, &timeout, sk->sk_state == TCP_CLOSE))
541 break; 541 break;
542 rc = -ERESTARTSYS; 542 rc = -ERESTARTSYS;
@@ -547,7 +547,7 @@ static int llc_ui_wait_for_disc(struct sock *sk, long timeout)
547 break; 547 break;
548 rc = 0; 548 rc = 0;
549 } 549 }
550 finish_wait(sk->sk_sleep, &wait); 550 finish_wait(sk_sleep(sk), &wait);
551 return rc; 551 return rc;
552} 552}
553 553
@@ -556,13 +556,13 @@ static int llc_ui_wait_for_conn(struct sock *sk, long timeout)
556 DEFINE_WAIT(wait); 556 DEFINE_WAIT(wait);
557 557
558 while (1) { 558 while (1) {
559 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 559 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
560 if (sk_wait_event(sk, &timeout, sk->sk_state != TCP_SYN_SENT)) 560 if (sk_wait_event(sk, &timeout, sk->sk_state != TCP_SYN_SENT))
561 break; 561 break;
562 if (signal_pending(current) || !timeout) 562 if (signal_pending(current) || !timeout)
563 break; 563 break;
564 } 564 }
565 finish_wait(sk->sk_sleep, &wait); 565 finish_wait(sk_sleep(sk), &wait);
566 return timeout; 566 return timeout;
567} 567}
568 568
@@ -573,7 +573,7 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout)
573 int rc; 573 int rc;
574 574
575 while (1) { 575 while (1) {
576 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 576 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
577 rc = 0; 577 rc = 0;
578 if (sk_wait_event(sk, &timeout, 578 if (sk_wait_event(sk, &timeout,
579 (sk->sk_shutdown & RCV_SHUTDOWN) || 579 (sk->sk_shutdown & RCV_SHUTDOWN) ||
@@ -588,7 +588,7 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout)
588 if (!timeout) 588 if (!timeout)
589 break; 589 break;
590 } 590 }
591 finish_wait(sk->sk_sleep, &wait); 591 finish_wait(sk_sleep(sk), &wait);
592 return rc; 592 return rc;
593} 593}
594 594
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 9598fdb4ad01..6bb9a9a94960 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -19,8 +19,9 @@
19#include "ieee80211_i.h" 19#include "ieee80211_i.h"
20#include "driver-ops.h" 20#include "driver-ops.h"
21 21
22void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, 22static void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
23 u16 initiator, u16 reason) 23 u16 initiator, u16 reason,
24 bool from_timer)
24{ 25{
25 struct ieee80211_local *local = sta->local; 26 struct ieee80211_local *local = sta->local;
26 struct tid_ampdu_rx *tid_rx; 27 struct tid_ampdu_rx *tid_rx;
@@ -70,10 +71,17 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
70 71
71 spin_unlock_bh(&sta->lock); 72 spin_unlock_bh(&sta->lock);
72 73
73 del_timer_sync(&tid_rx->session_timer); 74 if (!from_timer)
75 del_timer_sync(&tid_rx->session_timer);
74 kfree(tid_rx); 76 kfree(tid_rx);
75} 77}
76 78
79void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
80 u16 initiator, u16 reason)
81{
82 ___ieee80211_stop_rx_ba_session(sta, tid, initiator, reason, false);
83}
84
77/* 85/*
78 * After accepting the AddBA Request we activated a timer, 86 * After accepting the AddBA Request we activated a timer,
79 * resetting it after each frame that arrives from the originator. 87 * resetting it after each frame that arrives from the originator.
@@ -92,8 +100,8 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
92#ifdef CONFIG_MAC80211_HT_DEBUG 100#ifdef CONFIG_MAC80211_HT_DEBUG
93 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); 101 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
94#endif 102#endif
95 __ieee80211_stop_rx_ba_session(sta, *ptid, WLAN_BACK_RECIPIENT, 103 ___ieee80211_stop_rx_ba_session(sta, *ptid, WLAN_BACK_RECIPIENT,
96 WLAN_REASON_QSTA_TIMEOUT); 104 WLAN_REASON_QSTA_TIMEOUT, true);
97} 105}
98 106
99static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid, 107static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid,
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 608063f11797..c163d0a149f4 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -184,10 +184,9 @@ static void sta_addba_resp_timer_expired(unsigned long data)
184 HT_AGG_STATE_REQ_STOP_BA_MSK)) != 184 HT_AGG_STATE_REQ_STOP_BA_MSK)) !=
185 HT_ADDBA_REQUESTED_MSK) { 185 HT_ADDBA_REQUESTED_MSK) {
186 spin_unlock_bh(&sta->lock); 186 spin_unlock_bh(&sta->lock);
187 *state = HT_AGG_STATE_IDLE;
188#ifdef CONFIG_MAC80211_HT_DEBUG 187#ifdef CONFIG_MAC80211_HT_DEBUG
189 printk(KERN_DEBUG "timer expired on tid %d but we are not " 188 printk(KERN_DEBUG "timer expired on tid %d but we are not "
190 "(or no longer) expecting addBA response there", 189 "(or no longer) expecting addBA response there\n",
191 tid); 190 tid);
192#endif 191#endif
193 return; 192 return;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 7dd7cda75cfa..ae37270a0633 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -97,9 +97,6 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
97 params->mesh_id_len, 97 params->mesh_id_len,
98 params->mesh_id); 98 params->mesh_id);
99 99
100 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || !flags)
101 return 0;
102
103 if (type == NL80211_IFTYPE_AP_VLAN && 100 if (type == NL80211_IFTYPE_AP_VLAN &&
104 params && params->use_4addr == 0) 101 params && params->use_4addr == 0)
105 rcu_assign_pointer(sdata->u.vlan.sta, NULL); 102 rcu_assign_pointer(sdata->u.vlan.sta, NULL);
@@ -107,7 +104,9 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
107 params && params->use_4addr >= 0) 104 params && params->use_4addr >= 0)
108 sdata->u.mgd.use_4addr = params->use_4addr; 105 sdata->u.mgd.use_4addr = params->use_4addr;
109 106
110 sdata->u.mntr_flags = *flags; 107 if (sdata->vif.type == NL80211_IFTYPE_MONITOR && flags)
108 sdata->u.mntr_flags = *flags;
109
111 return 0; 110 return 0;
112} 111}
113 112
@@ -411,6 +410,17 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
411 return ret; 410 return ret;
412} 411}
413 412
413static int ieee80211_dump_survey(struct wiphy *wiphy, struct net_device *dev,
414 int idx, struct survey_info *survey)
415{
416 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
417
418 if (!local->ops->get_survey)
419 return -EOPNOTSUPP;
420
421 return drv_get_survey(local, idx, survey);
422}
423
414static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev, 424static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
415 u8 *mac, struct station_info *sinfo) 425 u8 *mac, struct station_info *sinfo)
416{ 426{
@@ -1104,6 +1114,13 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
1104 changed |= BSS_CHANGED_BASIC_RATES; 1114 changed |= BSS_CHANGED_BASIC_RATES;
1105 } 1115 }
1106 1116
1117 if (params->ap_isolate >= 0) {
1118 if (params->ap_isolate)
1119 sdata->flags |= IEEE80211_SDATA_DONT_BRIDGE_PACKETS;
1120 else
1121 sdata->flags &= ~IEEE80211_SDATA_DONT_BRIDGE_PACKETS;
1122 }
1123
1107 ieee80211_bss_info_change_notify(sdata, changed); 1124 ieee80211_bss_info_change_notify(sdata, changed);
1108 1125
1109 return 0; 1126 return 0;
@@ -1388,11 +1405,11 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
1388 return -EOPNOTSUPP; 1405 return -EOPNOTSUPP;
1389 1406
1390 if (enabled == sdata->u.mgd.powersave && 1407 if (enabled == sdata->u.mgd.powersave &&
1391 timeout == conf->dynamic_ps_timeout) 1408 timeout == conf->dynamic_ps_forced_timeout)
1392 return 0; 1409 return 0;
1393 1410
1394 sdata->u.mgd.powersave = enabled; 1411 sdata->u.mgd.powersave = enabled;
1395 conf->dynamic_ps_timeout = timeout; 1412 conf->dynamic_ps_forced_timeout = timeout;
1396 1413
1397 /* no change, but if automatic follow powersave */ 1414 /* no change, but if automatic follow powersave */
1398 mutex_lock(&sdata->u.mgd.mtx); 1415 mutex_lock(&sdata->u.mgd.mtx);
@@ -1508,6 +1525,7 @@ struct cfg80211_ops mac80211_config_ops = {
1508 .change_station = ieee80211_change_station, 1525 .change_station = ieee80211_change_station,
1509 .get_station = ieee80211_get_station, 1526 .get_station = ieee80211_get_station,
1510 .dump_station = ieee80211_dump_station, 1527 .dump_station = ieee80211_dump_station,
1528 .dump_survey = ieee80211_dump_survey,
1511#ifdef CONFIG_MAC80211_MESH 1529#ifdef CONFIG_MAC80211_MESH
1512 .add_mpath = ieee80211_add_mpath, 1530 .add_mpath = ieee80211_add_mpath,
1513 .del_mpath = ieee80211_del_mpath, 1531 .del_mpath = ieee80211_del_mpath,
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 6bc9b07c3eda..e763f1529ddb 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -39,6 +39,13 @@ static const struct file_operations sta_ ##name## _ops = { \
39 .open = mac80211_open_file_generic, \ 39 .open = mac80211_open_file_generic, \
40} 40}
41 41
42#define STA_OPS_RW(name) \
43static const struct file_operations sta_ ##name## _ops = { \
44 .read = sta_##name##_read, \
45 .write = sta_##name##_write, \
46 .open = mac80211_open_file_generic, \
47}
48
42#define STA_FILE(name, field, format) \ 49#define STA_FILE(name, field, format) \
43 STA_READ_##format(name, field) \ 50 STA_READ_##format(name, field) \
44 STA_OPS(name) 51 STA_OPS(name)
@@ -156,7 +163,63 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
156 163
157 return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); 164 return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
158} 165}
159STA_OPS(agg_status); 166
167static ssize_t sta_agg_status_write(struct file *file, const char __user *userbuf,
168 size_t count, loff_t *ppos)
169{
170 char _buf[12], *buf = _buf;
171 struct sta_info *sta = file->private_data;
172 bool start, tx;
173 unsigned long tid;
174 int ret;
175
176 if (count > sizeof(_buf))
177 return -EINVAL;
178
179 if (copy_from_user(buf, userbuf, count))
180 return -EFAULT;
181
182 buf[sizeof(_buf) - 1] = '\0';
183
184 if (strncmp(buf, "tx ", 3) == 0) {
185 buf += 3;
186 tx = true;
187 } else if (strncmp(buf, "rx ", 3) == 0) {
188 buf += 3;
189 tx = false;
190 } else
191 return -EINVAL;
192
193 if (strncmp(buf, "start ", 6) == 0) {
194 buf += 6;
195 start = true;
196 if (!tx)
197 return -EINVAL;
198 } else if (strncmp(buf, "stop ", 5) == 0) {
199 buf += 5;
200 start = false;
201 } else
202 return -EINVAL;
203
204 tid = simple_strtoul(buf, NULL, 0);
205
206 if (tid >= STA_TID_NUM)
207 return -EINVAL;
208
209 if (tx) {
210 if (start)
211 ret = ieee80211_start_tx_ba_session(&sta->sta, tid);
212 else
213 ret = ieee80211_stop_tx_ba_session(&sta->sta, tid,
214 WLAN_BACK_RECIPIENT);
215 } else {
216 __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, 3);
217 ret = 0;
218 }
219
220 return ret ?: count;
221}
222STA_OPS_RW(agg_status);
160 223
161static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf, 224static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
162 size_t count, loff_t *ppos) 225 size_t count, loff_t *ppos)
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 9179196da264..ee8b63f92f71 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -152,14 +152,15 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local,
152} 152}
153 153
154static inline int drv_hw_scan(struct ieee80211_local *local, 154static inline int drv_hw_scan(struct ieee80211_local *local,
155 struct ieee80211_sub_if_data *sdata,
155 struct cfg80211_scan_request *req) 156 struct cfg80211_scan_request *req)
156{ 157{
157 int ret; 158 int ret;
158 159
159 might_sleep(); 160 might_sleep();
160 161
161 ret = local->ops->hw_scan(&local->hw, req); 162 ret = local->ops->hw_scan(&local->hw, &sdata->vif, req);
162 trace_drv_hw_scan(local, req, ret); 163 trace_drv_hw_scan(local, sdata, req, ret);
163 return ret; 164 return ret;
164} 165}
165 166
@@ -344,6 +345,15 @@ static inline int drv_ampdu_action(struct ieee80211_local *local,
344 return ret; 345 return ret;
345} 346}
346 347
348static inline int drv_get_survey(struct ieee80211_local *local, int idx,
349 struct survey_info *survey)
350{
351 int ret = -EOPNOTSUPP;
352 if (local->ops->conf_tx)
353 ret = local->ops->get_survey(&local->hw, idx, survey);
354 /* trace_drv_get_survey(local, idx, survey, ret); */
355 return ret;
356}
347 357
348static inline void drv_rfkill_poll(struct ieee80211_local *local) 358static inline void drv_rfkill_poll(struct ieee80211_local *local)
349{ 359{
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index e209cb82ff29..ce734b58d07a 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -363,23 +363,26 @@ TRACE_EVENT(drv_update_tkip_key,
363 363
364TRACE_EVENT(drv_hw_scan, 364TRACE_EVENT(drv_hw_scan,
365 TP_PROTO(struct ieee80211_local *local, 365 TP_PROTO(struct ieee80211_local *local,
366 struct ieee80211_sub_if_data *sdata,
366 struct cfg80211_scan_request *req, int ret), 367 struct cfg80211_scan_request *req, int ret),
367 368
368 TP_ARGS(local, req, ret), 369 TP_ARGS(local, sdata, req, ret),
369 370
370 TP_STRUCT__entry( 371 TP_STRUCT__entry(
371 LOCAL_ENTRY 372 LOCAL_ENTRY
373 VIF_ENTRY
372 __field(int, ret) 374 __field(int, ret)
373 ), 375 ),
374 376
375 TP_fast_assign( 377 TP_fast_assign(
376 LOCAL_ASSIGN; 378 LOCAL_ASSIGN;
379 VIF_ASSIGN;
377 __entry->ret = ret; 380 __entry->ret = ret;
378 ), 381 ),
379 382
380 TP_printk( 383 TP_printk(
381 LOCAL_PR_FMT " ret:%d", 384 LOCAL_PR_FMT VIF_PR_FMT " ret:%d",
382 LOCAL_PR_ARG, __entry->ret 385 LOCAL_PR_ARG,VIF_PR_ARG, __entry->ret
383 ) 386 )
384); 387);
385 388
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index e6f3b0c7a71f..b72ee6435fa3 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -92,6 +92,12 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
92 if (memcmp(ifibss->bssid, bssid, ETH_ALEN)) 92 if (memcmp(ifibss->bssid, bssid, ETH_ALEN))
93 sta_info_flush(sdata->local, sdata); 93 sta_info_flush(sdata->local, sdata);
94 94
95 /* if merging, indicate to driver that we leave the old IBSS */
96 if (sdata->vif.bss_conf.ibss_joined) {
97 sdata->vif.bss_conf.ibss_joined = false;
98 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IBSS);
99 }
100
95 memcpy(ifibss->bssid, bssid, ETH_ALEN); 101 memcpy(ifibss->bssid, bssid, ETH_ALEN);
96 102
97 sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; 103 sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
@@ -171,6 +177,8 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
171 bss_change |= BSS_CHANGED_BSSID; 177 bss_change |= BSS_CHANGED_BSSID;
172 bss_change |= BSS_CHANGED_BEACON; 178 bss_change |= BSS_CHANGED_BEACON;
173 bss_change |= BSS_CHANGED_BEACON_ENABLED; 179 bss_change |= BSS_CHANGED_BEACON_ENABLED;
180 bss_change |= BSS_CHANGED_IBSS;
181 sdata->vif.bss_conf.ibss_joined = true;
174 ieee80211_bss_info_change_notify(sdata, bss_change); 182 ieee80211_bss_info_change_notify(sdata, bss_change);
175 183
176 ieee80211_sta_def_wmm_params(sdata, sband->n_bitrates, supp_rates); 184 ieee80211_sta_def_wmm_params(sdata, sband->n_bitrates, supp_rates);
@@ -481,7 +489,9 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
481 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other " 489 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other "
482 "IBSS networks with same SSID (merge)\n", sdata->name); 490 "IBSS networks with same SSID (merge)\n", sdata->name);
483 491
484 ieee80211_request_internal_scan(sdata, ifibss->ssid, ifibss->ssid_len); 492 ieee80211_request_internal_scan(sdata,
493 ifibss->ssid, ifibss->ssid_len,
494 ifibss->fixed_channel ? ifibss->channel : NULL);
485} 495}
486 496
487static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) 497static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
@@ -588,8 +598,9 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
588 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to " 598 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to "
589 "join\n", sdata->name); 599 "join\n", sdata->name);
590 600
591 ieee80211_request_internal_scan(sdata, ifibss->ssid, 601 ieee80211_request_internal_scan(sdata,
592 ifibss->ssid_len); 602 ifibss->ssid, ifibss->ssid_len,
603 ifibss->fixed_channel ? ifibss->channel : NULL);
593 } else { 604 } else {
594 int interval = IEEE80211_SCAN_INTERVAL; 605 int interval = IEEE80211_SCAN_INTERVAL;
595 606
@@ -897,6 +908,12 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
897 sdata->u.ibss.channel = params->channel; 908 sdata->u.ibss.channel = params->channel;
898 sdata->u.ibss.fixed_channel = params->channel_fixed; 909 sdata->u.ibss.fixed_channel = params->channel_fixed;
899 910
911 /* fix ourselves to that channel now already */
912 if (params->channel_fixed) {
913 sdata->local->oper_channel = params->channel;
914 sdata->local->oper_channel_type = NL80211_CHAN_NO_HT;
915 }
916
900 if (params->ie) { 917 if (params->ie) {
901 sdata->u.ibss.ie = kmemdup(params->ie, params->ie_len, 918 sdata->u.ibss.ie = kmemdup(params->ie, params->ie_len,
902 GFP_KERNEL); 919 GFP_KERNEL);
@@ -951,7 +968,9 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
951 kfree(sdata->u.ibss.ie); 968 kfree(sdata->u.ibss.ie);
952 skb = sdata->u.ibss.presp; 969 skb = sdata->u.ibss.presp;
953 rcu_assign_pointer(sdata->u.ibss.presp, NULL); 970 rcu_assign_pointer(sdata->u.ibss.presp, NULL);
954 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); 971 sdata->vif.bss_conf.ibss_joined = false;
972 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
973 BSS_CHANGED_IBSS);
955 synchronize_rcu(); 974 synchronize_rcu();
956 kfree_skb(skb); 975 kfree_skb(skb);
957 976
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index c9712f35e596..cbaf4981e110 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1019,7 +1019,8 @@ void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata);
1019/* scan/BSS handling */ 1019/* scan/BSS handling */
1020void ieee80211_scan_work(struct work_struct *work); 1020void ieee80211_scan_work(struct work_struct *work);
1021int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata, 1021int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
1022 const u8 *ssid, u8 ssid_len); 1022 const u8 *ssid, u8 ssid_len,
1023 struct ieee80211_channel *chan);
1023int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, 1024int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
1024 struct cfg80211_scan_request *req); 1025 struct cfg80211_scan_request *req);
1025void ieee80211_scan_cancel(struct ieee80211_local *local); 1026void ieee80211_scan_cancel(struct ieee80211_local *local);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index e8f6e3b252d8..8d4b41787dcf 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -140,6 +140,7 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
140 struct ieee80211_sub_if_data, 140 struct ieee80211_sub_if_data,
141 u.ap); 141 u.ap);
142 142
143 key->conf.ap_addr = sdata->dev->dev_addr;
143 ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf); 144 ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf);
144 145
145 if (!ret) { 146 if (!ret) {
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 011ee85bcd57..bd632e1ee2c5 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -442,7 +442,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
442 struct ieee80211_local *local = hw_to_local(hw); 442 struct ieee80211_local *local = hw_to_local(hw);
443 int result; 443 int result;
444 enum ieee80211_band band; 444 enum ieee80211_band band;
445 int channels, i, j, max_bitrates; 445 int channels, max_bitrates;
446 bool supp_ht; 446 bool supp_ht;
447 static const u32 cipher_suites[] = { 447 static const u32 cipher_suites[] = {
448 WLAN_CIPHER_SUITE_WEP40, 448 WLAN_CIPHER_SUITE_WEP40,
@@ -572,6 +572,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
572 572
573 local->hw.conf.listen_interval = local->hw.max_listen_interval; 573 local->hw.conf.listen_interval = local->hw.max_listen_interval;
574 574
575 local->hw.conf.dynamic_ps_forced_timeout = -1;
576
575 result = sta_info_start(local); 577 result = sta_info_start(local);
576 if (result < 0) 578 if (result < 0)
577 goto fail_sta_info; 579 goto fail_sta_info;
@@ -606,21 +608,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
606 608
607 ieee80211_led_init(local); 609 ieee80211_led_init(local);
608 610
609 /* alloc internal scan request */
610 i = 0;
611 local->int_scan_req->ssids = &local->scan_ssid;
612 local->int_scan_req->n_ssids = 1;
613 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
614 if (!hw->wiphy->bands[band])
615 continue;
616 for (j = 0; j < hw->wiphy->bands[band]->n_channels; j++) {
617 local->int_scan_req->channels[i] =
618 &hw->wiphy->bands[band]->channels[j];
619 i++;
620 }
621 }
622 local->int_scan_req->n_channels = i;
623
624 local->network_latency_notifier.notifier_call = 611 local->network_latency_notifier.notifier_call =
625 ieee80211_max_network_latency; 612 ieee80211_max_network_latency;
626 result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY, 613 result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 35d850223a75..358226f63b81 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -175,6 +175,8 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
175 ht_changed = conf_is_ht(&local->hw.conf) != enable_ht || 175 ht_changed = conf_is_ht(&local->hw.conf) != enable_ht ||
176 channel_type != local->hw.conf.channel_type; 176 channel_type != local->hw.conf.channel_type;
177 177
178 if (local->tmp_channel)
179 local->tmp_channel_type = channel_type;
178 local->oper_channel_type = channel_type; 180 local->oper_channel_type = channel_type;
179 181
180 if (ht_changed) { 182 if (ht_changed) {
@@ -476,6 +478,7 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
476{ 478{
477 struct ieee80211_sub_if_data *sdata, *found = NULL; 479 struct ieee80211_sub_if_data *sdata, *found = NULL;
478 int count = 0; 480 int count = 0;
481 int timeout;
479 482
480 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) { 483 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) {
481 local->ps_sdata = NULL; 484 local->ps_sdata = NULL;
@@ -509,6 +512,26 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
509 beaconint_us = ieee80211_tu_to_usec( 512 beaconint_us = ieee80211_tu_to_usec(
510 found->vif.bss_conf.beacon_int); 513 found->vif.bss_conf.beacon_int);
511 514
515 timeout = local->hw.conf.dynamic_ps_forced_timeout;
516 if (timeout < 0) {
517 /*
518 * The 2 second value is there for compatibility until
519 * the PM_QOS_NETWORK_LATENCY is configured with real
520 * values.
521 */
522 if (latency == 2000000000)
523 timeout = 100;
524 else if (latency <= 50000)
525 timeout = 300;
526 else if (latency <= 100000)
527 timeout = 100;
528 else if (latency <= 500000)
529 timeout = 50;
530 else
531 timeout = 0;
532 }
533 local->hw.conf.dynamic_ps_timeout = timeout;
534
512 if (beaconint_us > latency) { 535 if (beaconint_us > latency) {
513 local->ps_sdata = NULL; 536 local->ps_sdata = NULL;
514 } else { 537 } else {
@@ -1331,12 +1354,17 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1331 mutex_lock(&sdata->local->iflist_mtx); 1354 mutex_lock(&sdata->local->iflist_mtx);
1332 ieee80211_recalc_ps(sdata->local, -1); 1355 ieee80211_recalc_ps(sdata->local, -1);
1333 mutex_unlock(&sdata->local->iflist_mtx); 1356 mutex_unlock(&sdata->local->iflist_mtx);
1357
1358 if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
1359 return;
1360
1334 /* 1361 /*
1335 * We've received a probe response, but are not sure whether 1362 * We've received a probe response, but are not sure whether
1336 * we have or will be receiving any beacons or data, so let's 1363 * we have or will be receiving any beacons or data, so let's
1337 * schedule the timers again, just in case. 1364 * schedule the timers again, just in case.
1338 */ 1365 */
1339 mod_beacon_timer(sdata); 1366 mod_beacon_timer(sdata);
1367
1340 mod_timer(&ifmgd->conn_mon_timer, 1368 mod_timer(&ifmgd->conn_mon_timer,
1341 round_jiffies_up(jiffies + 1369 round_jiffies_up(jiffies +
1342 IEEE80211_CONNECTION_IDLE_TIME)); 1370 IEEE80211_CONNECTION_IDLE_TIME));
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 72efbd87c1eb..9a08f2c446c6 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -81,8 +81,6 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
81 len += 8; 81 len += 8;
82 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) 82 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
83 len += 1; 83 len += 1;
84 if (local->hw.flags & IEEE80211_HW_NOISE_DBM)
85 len += 1;
86 84
87 if (len & 1) /* padding for RX_FLAGS if necessary */ 85 if (len & 1) /* padding for RX_FLAGS if necessary */
88 len++; 86 len++;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index e1a3defdf581..e14c44195ae9 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -85,7 +85,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
85{ 85{
86 struct cfg80211_bss *cbss; 86 struct cfg80211_bss *cbss;
87 struct ieee80211_bss *bss; 87 struct ieee80211_bss *bss;
88 int clen; 88 int clen, srlen;
89 s32 signal = 0; 89 s32 signal = 0;
90 90
91 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) 91 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
@@ -114,23 +114,24 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
114 bss->dtim_period = tim_ie->dtim_period; 114 bss->dtim_period = tim_ie->dtim_period;
115 } 115 }
116 116
117 bss->supp_rates_len = 0; 117 /* replace old supported rates if we get new values */
118 srlen = 0;
118 if (elems->supp_rates) { 119 if (elems->supp_rates) {
119 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; 120 clen = IEEE80211_MAX_SUPP_RATES;
120 if (clen > elems->supp_rates_len) 121 if (clen > elems->supp_rates_len)
121 clen = elems->supp_rates_len; 122 clen = elems->supp_rates_len;
122 memcpy(&bss->supp_rates[bss->supp_rates_len], elems->supp_rates, 123 memcpy(bss->supp_rates, elems->supp_rates, clen);
123 clen); 124 srlen += clen;
124 bss->supp_rates_len += clen;
125 } 125 }
126 if (elems->ext_supp_rates) { 126 if (elems->ext_supp_rates) {
127 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; 127 clen = IEEE80211_MAX_SUPP_RATES - srlen;
128 if (clen > elems->ext_supp_rates_len) 128 if (clen > elems->ext_supp_rates_len)
129 clen = elems->ext_supp_rates_len; 129 clen = elems->ext_supp_rates_len;
130 memcpy(&bss->supp_rates[bss->supp_rates_len], 130 memcpy(bss->supp_rates + srlen, elems->ext_supp_rates, clen);
131 elems->ext_supp_rates, clen); 131 srlen += clen;
132 bss->supp_rates_len += clen;
133 } 132 }
133 if (srlen)
134 bss->supp_rates_len = srlen;
134 135
135 bss->wmm_used = elems->wmm_param || elems->wmm_info; 136 bss->wmm_used = elems->wmm_param || elems->wmm_info;
136 bss->uapsd_supported = is_uapsd_supported(elems); 137 bss->uapsd_supported = is_uapsd_supported(elems);
@@ -411,7 +412,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
411 412
412 if (local->ops->hw_scan) { 413 if (local->ops->hw_scan) {
413 WARN_ON(!ieee80211_prep_hw_scan(local)); 414 WARN_ON(!ieee80211_prep_hw_scan(local));
414 rc = drv_hw_scan(local, local->hw_scan_req); 415 rc = drv_hw_scan(local, sdata, local->hw_scan_req);
415 } else 416 } else
416 rc = ieee80211_start_sw_scan(local); 417 rc = ieee80211_start_sw_scan(local);
417 418
@@ -655,7 +656,7 @@ void ieee80211_scan_work(struct work_struct *work)
655 } 656 }
656 657
657 if (local->hw_scan_req) { 658 if (local->hw_scan_req) {
658 int rc = drv_hw_scan(local, local->hw_scan_req); 659 int rc = drv_hw_scan(local, sdata, local->hw_scan_req);
659 mutex_unlock(&local->scan_mtx); 660 mutex_unlock(&local->scan_mtx);
660 if (rc) 661 if (rc)
661 ieee80211_scan_completed(&local->hw, true); 662 ieee80211_scan_completed(&local->hw, true);
@@ -728,10 +729,12 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
728} 729}
729 730
730int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata, 731int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
731 const u8 *ssid, u8 ssid_len) 732 const u8 *ssid, u8 ssid_len,
733 struct ieee80211_channel *chan)
732{ 734{
733 struct ieee80211_local *local = sdata->local; 735 struct ieee80211_local *local = sdata->local;
734 int ret = -EBUSY; 736 int ret = -EBUSY;
737 enum nl80211_band band;
735 738
736 mutex_lock(&local->scan_mtx); 739 mutex_lock(&local->scan_mtx);
737 740
@@ -739,6 +742,30 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
739 if (local->scan_req) 742 if (local->scan_req)
740 goto unlock; 743 goto unlock;
741 744
745 /* fill internal scan request */
746 if (!chan) {
747 int i, nchan = 0;
748
749 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
750 if (!local->hw.wiphy->bands[band])
751 continue;
752 for (i = 0;
753 i < local->hw.wiphy->bands[band]->n_channels;
754 i++) {
755 local->int_scan_req->channels[nchan] =
756 &local->hw.wiphy->bands[band]->channels[i];
757 nchan++;
758 }
759 }
760
761 local->int_scan_req->n_channels = nchan;
762 } else {
763 local->int_scan_req->channels[0] = chan;
764 local->int_scan_req->n_channels = 1;
765 }
766
767 local->int_scan_req->ssids = &local->scan_ssid;
768 local->int_scan_req->n_ssids = 1;
742 memcpy(local->int_scan_req->ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN); 769 memcpy(local->int_scan_req->ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN);
743 local->int_scan_req->ssids[0].ssid_len = ssid_len; 770 local->int_scan_req->ssids[0].ssid_len = ssid_len;
744 771
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index ff0eb948917b..730197591ab5 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -575,7 +575,7 @@ static int sta_info_buffer_expired(struct sta_info *sta,
575} 575}
576 576
577 577
578static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local, 578static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
579 struct sta_info *sta) 579 struct sta_info *sta)
580{ 580{
581 unsigned long flags; 581 unsigned long flags;
@@ -583,7 +583,7 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
583 struct ieee80211_sub_if_data *sdata; 583 struct ieee80211_sub_if_data *sdata;
584 584
585 if (skb_queue_empty(&sta->ps_tx_buf)) 585 if (skb_queue_empty(&sta->ps_tx_buf))
586 return; 586 return false;
587 587
588 for (;;) { 588 for (;;) {
589 spin_lock_irqsave(&sta->ps_tx_buf.lock, flags); 589 spin_lock_irqsave(&sta->ps_tx_buf.lock, flags);
@@ -608,6 +608,8 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
608 if (skb_queue_empty(&sta->ps_tx_buf)) 608 if (skb_queue_empty(&sta->ps_tx_buf))
609 sta_info_clear_tim_bit(sta); 609 sta_info_clear_tim_bit(sta);
610 } 610 }
611
612 return true;
611} 613}
612 614
613static int __must_check __sta_info_destroy(struct sta_info *sta) 615static int __must_check __sta_info_destroy(struct sta_info *sta)
@@ -755,15 +757,20 @@ static void sta_info_cleanup(unsigned long data)
755{ 757{
756 struct ieee80211_local *local = (struct ieee80211_local *) data; 758 struct ieee80211_local *local = (struct ieee80211_local *) data;
757 struct sta_info *sta; 759 struct sta_info *sta;
760 bool timer_needed = false;
758 761
759 rcu_read_lock(); 762 rcu_read_lock();
760 list_for_each_entry_rcu(sta, &local->sta_list, list) 763 list_for_each_entry_rcu(sta, &local->sta_list, list)
761 sta_info_cleanup_expire_buffered(local, sta); 764 if (sta_info_cleanup_expire_buffered(local, sta))
765 timer_needed = true;
762 rcu_read_unlock(); 766 rcu_read_unlock();
763 767
764 if (local->quiescing) 768 if (local->quiescing)
765 return; 769 return;
766 770
771 if (!timer_needed)
772 return;
773
767 local->sta_cleanup.expires = 774 local->sta_cleanup.expires =
768 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); 775 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL);
769 add_timer(&local->sta_cleanup); 776 add_timer(&local->sta_cleanup);
@@ -848,8 +855,12 @@ struct ieee80211_sta *ieee80211_find_sta_by_hw(struct ieee80211_hw *hw,
848 struct sta_info *sta, *nxt; 855 struct sta_info *sta, *nxt;
849 856
850 /* Just return a random station ... first in list ... */ 857 /* Just return a random station ... first in list ... */
851 for_each_sta_info(hw_to_local(hw), addr, sta, nxt) 858 for_each_sta_info(hw_to_local(hw), addr, sta, nxt) {
859 if (!sta->uploaded)
860 return NULL;
852 return &sta->sta; 861 return &sta->sta;
862 }
863
853 return NULL; 864 return NULL;
854} 865}
855EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_hw); 866EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_hw);
@@ -857,14 +868,19 @@ EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_hw);
857struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif, 868struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif,
858 const u8 *addr) 869 const u8 *addr)
859{ 870{
860 struct ieee80211_sub_if_data *sdata; 871 struct sta_info *sta;
861 872
862 if (!vif) 873 if (!vif)
863 return NULL; 874 return NULL;
864 875
865 sdata = vif_to_sdata(vif); 876 sta = sta_info_get_bss(vif_to_sdata(vif), addr);
877 if (!sta)
878 return NULL;
879
880 if (!sta->uploaded)
881 return NULL;
866 882
867 return ieee80211_find_sta_by_hw(&sdata->local->hw, addr); 883 return &sta->sta;
868} 884}
869EXPORT_SYMBOL(ieee80211_find_sta); 885EXPORT_SYMBOL(ieee80211_find_sta);
870 886
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 11805a3a626f..94613af009f3 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -171,6 +171,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
171 struct net_device *prev_dev = NULL; 171 struct net_device *prev_dev = NULL;
172 struct sta_info *sta, *tmp; 172 struct sta_info *sta, *tmp;
173 int retry_count = -1, i; 173 int retry_count = -1, i;
174 int rates_idx = -1;
174 bool send_to_cooked; 175 bool send_to_cooked;
175 176
176 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { 177 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
@@ -178,6 +179,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
178 if (i >= hw->max_rates) { 179 if (i >= hw->max_rates) {
179 info->status.rates[i].idx = -1; 180 info->status.rates[i].idx = -1;
180 info->status.rates[i].count = 0; 181 info->status.rates[i].count = 0;
182 } else if (info->status.rates[i].idx >= 0) {
183 rates_idx = i;
181 } 184 }
182 185
183 retry_count += info->status.rates[i].count; 186 retry_count += info->status.rates[i].count;
@@ -206,6 +209,10 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
206 return; 209 return;
207 } 210 }
208 211
212 if ((local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) &&
213 (rates_idx != -1))
214 sta->last_tx_rate = info->status.rates[rates_idx];
215
209 if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) && 216 if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
210 (ieee80211_is_data_qos(fc))) { 217 (ieee80211_is_data_qos(fc))) {
211 u16 tid, ssn; 218 u16 tid, ssn;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 2cb77267f733..f3841f43249e 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -429,6 +429,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
429 struct sta_info *sta = tx->sta; 429 struct sta_info *sta = tx->sta;
430 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 430 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
431 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 431 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
432 struct ieee80211_local *local = tx->local;
432 u32 staflags; 433 u32 staflags;
433 434
434 if (unlikely(!sta || 435 if (unlikely(!sta ||
@@ -476,6 +477,12 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
476 info->control.vif = &tx->sdata->vif; 477 info->control.vif = &tx->sdata->vif;
477 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 478 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
478 skb_queue_tail(&sta->ps_tx_buf, tx->skb); 479 skb_queue_tail(&sta->ps_tx_buf, tx->skb);
480
481 if (!timer_pending(&local->sta_cleanup))
482 mod_timer(&local->sta_cleanup,
483 round_jiffies(jiffies +
484 STA_INFO_CLEANUP_INTERVAL));
485
479 return TX_QUEUED; 486 return TX_QUEUED;
480 } 487 }
481#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 488#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
@@ -586,7 +593,8 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
586 struct ieee80211_hdr *hdr = (void *)tx->skb->data; 593 struct ieee80211_hdr *hdr = (void *)tx->skb->data;
587 struct ieee80211_supported_band *sband; 594 struct ieee80211_supported_band *sband;
588 struct ieee80211_rate *rate; 595 struct ieee80211_rate *rate;
589 int i, len; 596 int i;
597 u32 len;
590 bool inval = false, rts = false, short_preamble = false; 598 bool inval = false, rts = false, short_preamble = false;
591 struct ieee80211_tx_rate_control txrc; 599 struct ieee80211_tx_rate_control txrc;
592 u32 sta_flags; 600 u32 sta_flags;
@@ -595,7 +603,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
595 603
596 sband = tx->local->hw.wiphy->bands[tx->channel->band]; 604 sband = tx->local->hw.wiphy->bands[tx->channel->band];
597 605
598 len = min_t(int, tx->skb->len + FCS_LEN, 606 len = min_t(u32, tx->skb->len + FCS_LEN,
599 tx->local->hw.wiphy->frag_threshold); 607 tx->local->hw.wiphy->frag_threshold);
600 608
601 /* set up the tx rate control struct we give the RC algo */ 609 /* set up the tx rate control struct we give the RC algo */
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index bdb1d05b16fc..3dd07600199d 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -213,15 +213,25 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
213 213
214 sband = local->hw.wiphy->bands[wk->chan->band]; 214 sband = local->hw.wiphy->bands[wk->chan->band];
215 215
216 /* 216 if (wk->assoc.supp_rates_len) {
217 * Get all rates supported by the device and the AP as 217 /*
218 * some APs don't like getting a superset of their rates 218 * Get all rates supported by the device and the AP as
219 * in the association request (e.g. D-Link DAP 1353 in 219 * some APs don't like getting a superset of their rates
220 * b-only mode)... 220 * in the association request (e.g. D-Link DAP 1353 in
221 */ 221 * b-only mode)...
222 rates_len = ieee80211_compatible_rates(wk->assoc.supp_rates, 222 */
223 wk->assoc.supp_rates_len, 223 rates_len = ieee80211_compatible_rates(wk->assoc.supp_rates,
224 sband, &rates); 224 wk->assoc.supp_rates_len,
225 sband, &rates);
226 } else {
227 /*
228 * In case AP not provide any supported rates information
229 * before association, we send information element(s) with
230 * all rates that we support.
231 */
232 rates = ~0;
233 rates_len = sband->n_bitrates;
234 }
225 235
226 skb = alloc_skb(local->hw.extra_tx_headroom + 236 skb = alloc_skb(local->hw.extra_tx_headroom +
227 sizeof(*mgmt) + /* bit too much but doesn't matter */ 237 sizeof(*mgmt) + /* bit too much but doesn't matter */
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 8fb0ae616761..7ba06939829f 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -802,7 +802,7 @@ static int sync_thread_backup(void *data)
802 ip_vs_backup_mcast_ifn, ip_vs_backup_syncid); 802 ip_vs_backup_mcast_ifn, ip_vs_backup_syncid);
803 803
804 while (!kthread_should_stop()) { 804 while (!kthread_should_stop()) {
805 wait_event_interruptible(*tinfo->sock->sk->sk_sleep, 805 wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
806 !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue) 806 !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue)
807 || kthread_should_stop()); 807 || kthread_should_stop());
808 808
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index fa07f044b599..06cb02796a0e 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -739,7 +739,7 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
739 DEFINE_WAIT(wait); 739 DEFINE_WAIT(wait);
740 740
741 for (;;) { 741 for (;;) {
742 prepare_to_wait(sk->sk_sleep, &wait, 742 prepare_to_wait(sk_sleep(sk), &wait,
743 TASK_INTERRUPTIBLE); 743 TASK_INTERRUPTIBLE);
744 if (sk->sk_state != TCP_SYN_SENT) 744 if (sk->sk_state != TCP_SYN_SENT)
745 break; 745 break;
@@ -752,7 +752,7 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
752 err = -ERESTARTSYS; 752 err = -ERESTARTSYS;
753 break; 753 break;
754 } 754 }
755 finish_wait(sk->sk_sleep, &wait); 755 finish_wait(sk_sleep(sk), &wait);
756 if (err) 756 if (err)
757 goto out_release; 757 goto out_release;
758 } 758 }
@@ -798,7 +798,7 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
798 * hooked into the SABM we saved 798 * hooked into the SABM we saved
799 */ 799 */
800 for (;;) { 800 for (;;) {
801 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 801 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
802 skb = skb_dequeue(&sk->sk_receive_queue); 802 skb = skb_dequeue(&sk->sk_receive_queue);
803 if (skb) 803 if (skb)
804 break; 804 break;
@@ -816,7 +816,7 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
816 err = -ERESTARTSYS; 816 err = -ERESTARTSYS;
817 break; 817 break;
818 } 818 }
819 finish_wait(sk->sk_sleep, &wait); 819 finish_wait(sk_sleep(sk), &wait);
820 if (err) 820 if (err)
821 goto out_release; 821 goto out_release;
822 822
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f162d59d8161..2078a277e06b 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2228,8 +2228,6 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
2228 case SIOCGIFDSTADDR: 2228 case SIOCGIFDSTADDR:
2229 case SIOCSIFDSTADDR: 2229 case SIOCSIFDSTADDR:
2230 case SIOCSIFFLAGS: 2230 case SIOCSIFFLAGS:
2231 if (!net_eq(sock_net(sk), &init_net))
2232 return -ENOIOCTLCMD;
2233 return inet_dgram_ops.ioctl(sock, cmd, arg); 2231 return inet_dgram_ops.ioctl(sock, cmd, arg);
2234#endif 2232#endif
2235 2233
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index e2a95762abd3..af4d38bc3b22 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -664,12 +664,12 @@ static int pep_wait_connreq(struct sock *sk, int noblock)
664 if (signal_pending(tsk)) 664 if (signal_pending(tsk))
665 return sock_intr_errno(timeo); 665 return sock_intr_errno(timeo);
666 666
667 prepare_to_wait_exclusive(&sk->sk_socket->wait, &wait, 667 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
668 TASK_INTERRUPTIBLE); 668 TASK_INTERRUPTIBLE);
669 release_sock(sk); 669 release_sock(sk);
670 timeo = schedule_timeout(timeo); 670 timeo = schedule_timeout(timeo);
671 lock_sock(sk); 671 lock_sock(sk);
672 finish_wait(&sk->sk_socket->wait, &wait); 672 finish_wait(sk_sleep(sk), &wait);
673 } 673 }
674 674
675 return 0; 675 return 0;
@@ -910,10 +910,10 @@ disabled:
910 goto out; 910 goto out;
911 } 911 }
912 912
913 prepare_to_wait(&sk->sk_socket->wait, &wait, 913 prepare_to_wait(sk_sleep(sk), &wait,
914 TASK_INTERRUPTIBLE); 914 TASK_INTERRUPTIBLE);
915 done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits)); 915 done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits));
916 finish_wait(&sk->sk_socket->wait, &wait); 916 finish_wait(sk_sleep(sk), &wait);
917 917
918 if (sk->sk_state != TCP_ESTABLISHED) 918 if (sk->sk_state != TCP_ESTABLISHED)
919 goto disabled; 919 goto disabled;
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 9b4ced6e0968..c33da6576942 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -46,9 +46,16 @@ struct phonet_net {
46 46
47int phonet_net_id __read_mostly; 47int phonet_net_id __read_mostly;
48 48
49static struct phonet_net *phonet_pernet(struct net *net)
50{
51 BUG_ON(!net);
52
53 return net_generic(net, phonet_net_id);
54}
55
49struct phonet_device_list *phonet_device_list(struct net *net) 56struct phonet_device_list *phonet_device_list(struct net *net)
50{ 57{
51 struct phonet_net *pnn = net_generic(net, phonet_net_id); 58 struct phonet_net *pnn = phonet_pernet(net);
52 return &pnn->pndevs; 59 return &pnn->pndevs;
53} 60}
54 61
@@ -261,7 +268,7 @@ static int phonet_device_autoconf(struct net_device *dev)
261 268
262static void phonet_route_autodel(struct net_device *dev) 269static void phonet_route_autodel(struct net_device *dev)
263{ 270{
264 struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id); 271 struct phonet_net *pnn = phonet_pernet(dev_net(dev));
265 unsigned i; 272 unsigned i;
266 DECLARE_BITMAP(deleted, 64); 273 DECLARE_BITMAP(deleted, 64);
267 274
@@ -313,7 +320,7 @@ static struct notifier_block phonet_device_notifier = {
313/* Per-namespace Phonet devices handling */ 320/* Per-namespace Phonet devices handling */
314static int __net_init phonet_init_net(struct net *net) 321static int __net_init phonet_init_net(struct net *net)
315{ 322{
316 struct phonet_net *pnn = net_generic(net, phonet_net_id); 323 struct phonet_net *pnn = phonet_pernet(net);
317 324
318 if (!proc_net_fops_create(net, "phonet", 0, &pn_sock_seq_fops)) 325 if (!proc_net_fops_create(net, "phonet", 0, &pn_sock_seq_fops))
319 return -ENOMEM; 326 return -ENOMEM;
@@ -326,7 +333,7 @@ static int __net_init phonet_init_net(struct net *net)
326 333
327static void __net_exit phonet_exit_net(struct net *net) 334static void __net_exit phonet_exit_net(struct net *net)
328{ 335{
329 struct phonet_net *pnn = net_generic(net, phonet_net_id); 336 struct phonet_net *pnn = phonet_pernet(net);
330 struct net_device *dev; 337 struct net_device *dev;
331 unsigned i; 338 unsigned i;
332 339
@@ -376,7 +383,7 @@ void phonet_device_exit(void)
376 383
377int phonet_route_add(struct net_device *dev, u8 daddr) 384int phonet_route_add(struct net_device *dev, u8 daddr)
378{ 385{
379 struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id); 386 struct phonet_net *pnn = phonet_pernet(dev_net(dev));
380 struct phonet_routes *routes = &pnn->routes; 387 struct phonet_routes *routes = &pnn->routes;
381 int err = -EEXIST; 388 int err = -EEXIST;
382 389
@@ -393,7 +400,7 @@ int phonet_route_add(struct net_device *dev, u8 daddr)
393 400
394int phonet_route_del(struct net_device *dev, u8 daddr) 401int phonet_route_del(struct net_device *dev, u8 daddr)
395{ 402{
396 struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id); 403 struct phonet_net *pnn = phonet_pernet(dev_net(dev));
397 struct phonet_routes *routes = &pnn->routes; 404 struct phonet_routes *routes = &pnn->routes;
398 405
399 daddr = daddr >> 2; 406 daddr = daddr >> 2;
@@ -413,7 +420,7 @@ int phonet_route_del(struct net_device *dev, u8 daddr)
413 420
414struct net_device *phonet_route_get(struct net *net, u8 daddr) 421struct net_device *phonet_route_get(struct net *net, u8 daddr)
415{ 422{
416 struct phonet_net *pnn = net_generic(net, phonet_net_id); 423 struct phonet_net *pnn = phonet_pernet(net);
417 struct phonet_routes *routes = &pnn->routes; 424 struct phonet_routes *routes = &pnn->routes;
418 struct net_device *dev; 425 struct net_device *dev;
419 426
@@ -428,7 +435,7 @@ struct net_device *phonet_route_get(struct net *net, u8 daddr)
428 435
429struct net_device *phonet_route_output(struct net *net, u8 daddr) 436struct net_device *phonet_route_output(struct net *net, u8 daddr)
430{ 437{
431 struct phonet_net *pnn = net_generic(net, phonet_net_id); 438 struct phonet_net *pnn = phonet_pernet(net);
432 struct phonet_routes *routes = &pnn->routes; 439 struct phonet_routes *routes = &pnn->routes;
433 struct net_device *dev; 440 struct net_device *dev;
434 441
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index c785bfd0744f..6e9848bf0370 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -265,7 +265,7 @@ static unsigned int pn_socket_poll(struct file *file, struct socket *sock,
265 struct pep_sock *pn = pep_sk(sk); 265 struct pep_sock *pn = pep_sk(sk);
266 unsigned int mask = 0; 266 unsigned int mask = 0;
267 267
268 poll_wait(file, &sock->wait, wait); 268 poll_wait(file, sk_sleep(sk), wait);
269 269
270 switch (sk->sk_state) { 270 switch (sk->sk_state) {
271 case TCP_LISTEN: 271 case TCP_LISTEN:
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 7919a9edb8e9..aebfecbdb841 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -158,7 +158,7 @@ static unsigned int rds_poll(struct file *file, struct socket *sock,
158 unsigned int mask = 0; 158 unsigned int mask = 0;
159 unsigned long flags; 159 unsigned long flags;
160 160
161 poll_wait(file, sk->sk_sleep, wait); 161 poll_wait(file, sk_sleep(sk), wait);
162 162
163 if (rs->rs_seen_congestion) 163 if (rs->rs_seen_congestion)
164 poll_wait(file, &rds_poll_waitq, wait); 164 poll_wait(file, &rds_poll_waitq, wait);
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 5ea82fc47c3e..e599ba2f950d 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -133,7 +133,7 @@ static int __init rds_rdma_listen_init(void)
133 ret = PTR_ERR(cm_id); 133 ret = PTR_ERR(cm_id);
134 printk(KERN_ERR "RDS/RDMA: failed to setup listener, " 134 printk(KERN_ERR "RDS/RDMA: failed to setup listener, "
135 "rdma_create_id() returned %d\n", ret); 135 "rdma_create_id() returned %d\n", ret);
136 goto out; 136 return ret;
137 } 137 }
138 138
139 sin.sin_family = AF_INET, 139 sin.sin_family = AF_INET,
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 4bec6e2ed495..c224b5bb3ba9 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -492,7 +492,7 @@ void rds_sock_put(struct rds_sock *rs);
492void rds_wake_sk_sleep(struct rds_sock *rs); 492void rds_wake_sk_sleep(struct rds_sock *rs);
493static inline void __rds_wake_sk_sleep(struct sock *sk) 493static inline void __rds_wake_sk_sleep(struct sock *sk)
494{ 494{
495 wait_queue_head_t *waitq = sk->sk_sleep; 495 wait_queue_head_t *waitq = sk_sleep(sk);
496 496
497 if (!sock_flag(sk, SOCK_DEAD) && waitq) 497 if (!sock_flag(sk, SOCK_DEAD) && waitq)
498 wake_up(waitq); 498 wake_up(waitq);
diff --git a/net/rds/recv.c b/net/rds/recv.c
index e2a2b9344f7b..795a00b7f2cb 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -432,7 +432,7 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
432 break; 432 break;
433 } 433 }
434 434
435 timeo = wait_event_interruptible_timeout(*sk->sk_sleep, 435 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
436 (!list_empty(&rs->rs_notify_queue) || 436 (!list_empty(&rs->rs_notify_queue) ||
437 rs->rs_cong_notify || 437 rs->rs_cong_notify ||
438 rds_next_incoming(rs, &inc)), timeo); 438 rds_next_incoming(rs, &inc)), timeo);
diff --git a/net/rds/send.c b/net/rds/send.c
index 53d6795ac9d0..9c1c6bcaa6c9 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -915,7 +915,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
915 goto out; 915 goto out;
916 } 916 }
917 917
918 timeo = wait_event_interruptible_timeout(*sk->sk_sleep, 918 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
919 rds_send_queue_rm(rs, conn, rm, 919 rds_send_queue_rm(rs, conn, rm,
920 rs->rs_bound_port, 920 rs->rs_bound_port,
921 dport, 921 dport,
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 4fb711a035f4..8e45e76a95f5 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -845,7 +845,7 @@ rose_try_next_neigh:
845 DEFINE_WAIT(wait); 845 DEFINE_WAIT(wait);
846 846
847 for (;;) { 847 for (;;) {
848 prepare_to_wait(sk->sk_sleep, &wait, 848 prepare_to_wait(sk_sleep(sk), &wait,
849 TASK_INTERRUPTIBLE); 849 TASK_INTERRUPTIBLE);
850 if (sk->sk_state != TCP_SYN_SENT) 850 if (sk->sk_state != TCP_SYN_SENT)
851 break; 851 break;
@@ -858,7 +858,7 @@ rose_try_next_neigh:
858 err = -ERESTARTSYS; 858 err = -ERESTARTSYS;
859 break; 859 break;
860 } 860 }
861 finish_wait(sk->sk_sleep, &wait); 861 finish_wait(sk_sleep(sk), &wait);
862 862
863 if (err) 863 if (err)
864 goto out_release; 864 goto out_release;
@@ -911,7 +911,7 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
911 * hooked into the SABM we saved 911 * hooked into the SABM we saved
912 */ 912 */
913 for (;;) { 913 for (;;) {
914 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 914 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
915 915
916 skb = skb_dequeue(&sk->sk_receive_queue); 916 skb = skb_dequeue(&sk->sk_receive_queue);
917 if (skb) 917 if (skb)
@@ -930,7 +930,7 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
930 err = -ERESTARTSYS; 930 err = -ERESTARTSYS;
931 break; 931 break;
932 } 932 }
933 finish_wait(sk->sk_sleep, &wait); 933 finish_wait(sk_sleep(sk), &wait);
934 if (err) 934 if (err)
935 goto out_release; 935 goto out_release;
936 936
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index c060095b27ce..0b9bb2085ce4 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -62,13 +62,15 @@ static inline int rxrpc_writable(struct sock *sk)
62static void rxrpc_write_space(struct sock *sk) 62static void rxrpc_write_space(struct sock *sk)
63{ 63{
64 _enter("%p", sk); 64 _enter("%p", sk);
65 read_lock(&sk->sk_callback_lock); 65 rcu_read_lock();
66 if (rxrpc_writable(sk)) { 66 if (rxrpc_writable(sk)) {
67 if (sk_has_sleeper(sk)) 67 struct socket_wq *wq = rcu_dereference(sk->sk_wq);
68 wake_up_interruptible(sk->sk_sleep); 68
69 if (wq_has_sleeper(wq))
70 wake_up_interruptible(&wq->wait);
69 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 71 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
70 } 72 }
71 read_unlock(&sk->sk_callback_lock); 73 rcu_read_unlock();
72} 74}
73 75
74/* 76/*
@@ -589,7 +591,7 @@ static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
589 unsigned int mask; 591 unsigned int mask;
590 struct sock *sk = sock->sk; 592 struct sock *sk = sock->sk;
591 593
592 sock_poll_wait(file, sk->sk_sleep, wait); 594 sock_poll_wait(file, sk_sleep(sk), wait);
593 mask = 0; 595 mask = 0;
594 596
595 /* the socket is readable if there are any messages waiting on the Rx 597 /* the socket is readable if there are any messages waiting on the Rx
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
index 60c2b94e6b54..0c65013e3bfe 100644
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/ar-recvmsg.c
@@ -91,7 +91,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
91 91
92 /* wait for a message to turn up */ 92 /* wait for a message to turn up */
93 release_sock(&rx->sk); 93 release_sock(&rx->sk);
94 prepare_to_wait_exclusive(rx->sk.sk_sleep, &wait, 94 prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
95 TASK_INTERRUPTIBLE); 95 TASK_INTERRUPTIBLE);
96 ret = sock_error(&rx->sk); 96 ret = sock_error(&rx->sk);
97 if (ret) 97 if (ret)
@@ -102,7 +102,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
102 goto wait_interrupted; 102 goto wait_interrupted;
103 timeo = schedule_timeout(timeo); 103 timeo = schedule_timeout(timeo);
104 } 104 }
105 finish_wait(rx->sk.sk_sleep, &wait); 105 finish_wait(sk_sleep(&rx->sk), &wait);
106 lock_sock(&rx->sk); 106 lock_sock(&rx->sk);
107 continue; 107 continue;
108 } 108 }
@@ -356,7 +356,7 @@ csum_copy_error:
356wait_interrupted: 356wait_interrupted:
357 ret = sock_intr_errno(timeo); 357 ret = sock_intr_errno(timeo);
358wait_error: 358wait_error:
359 finish_wait(rx->sk.sk_sleep, &wait); 359 finish_wait(sk_sleep(&rx->sk), &wait);
360 if (continue_call) 360 if (continue_call)
361 rxrpc_put_call(continue_call); 361 rxrpc_put_call(continue_call);
362 if (copied) 362 if (copied)
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index aeddabfb8e4e..a969b111bd76 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -94,7 +94,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
94 * Another cpu is holding lock, requeue & delay xmits for 94 * Another cpu is holding lock, requeue & delay xmits for
95 * some time. 95 * some time.
96 */ 96 */
97 __get_cpu_var(netdev_rx_stat).cpu_collision++; 97 __get_cpu_var(softnet_data).cpu_collision++;
98 ret = dev_requeue_skb(skb, q); 98 ret = dev_requeue_skb(skb, q);
99 } 99 }
100 100
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index c5a9ac566007..c65762823f5e 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -123,8 +123,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
123 case htons(ETH_P_IP): 123 case htons(ETH_P_IP):
124 { 124 {
125 const struct iphdr *iph = ip_hdr(skb); 125 const struct iphdr *iph = ip_hdr(skb);
126 h = iph->daddr; 126 h = (__force u32)iph->daddr;
127 h2 = iph->saddr ^ iph->protocol; 127 h2 = (__force u32)iph->saddr ^ iph->protocol;
128 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 128 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
129 (iph->protocol == IPPROTO_TCP || 129 (iph->protocol == IPPROTO_TCP ||
130 iph->protocol == IPPROTO_UDP || 130 iph->protocol == IPPROTO_UDP ||
@@ -138,8 +138,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
138 case htons(ETH_P_IPV6): 138 case htons(ETH_P_IPV6):
139 { 139 {
140 struct ipv6hdr *iph = ipv6_hdr(skb); 140 struct ipv6hdr *iph = ipv6_hdr(skb);
141 h = iph->daddr.s6_addr32[3]; 141 h = (__force u32)iph->daddr.s6_addr32[3];
142 h2 = iph->saddr.s6_addr32[3] ^ iph->nexthdr; 142 h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
143 if (iph->nexthdr == IPPROTO_TCP || 143 if (iph->nexthdr == IPPROTO_TCP ||
144 iph->nexthdr == IPPROTO_UDP || 144 iph->nexthdr == IPPROTO_UDP ||
145 iph->nexthdr == IPPROTO_UDPLITE || 145 iph->nexthdr == IPPROTO_UDPLITE ||
@@ -150,7 +150,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
150 break; 150 break;
151 } 151 }
152 default: 152 default:
153 h = (unsigned long)skb_dst(skb) ^ skb->protocol; 153 h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol;
154 h2 = (unsigned long)skb->sk; 154 h2 = (unsigned long)skb->sk;
155 } 155 }
156 156
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig
index 58b3e882a187..126b014eb79b 100644
--- a/net/sctp/Kconfig
+++ b/net/sctp/Kconfig
@@ -37,6 +37,18 @@ menuconfig IP_SCTP
37 37
38if IP_SCTP 38if IP_SCTP
39 39
40config NET_SCTPPROBE
41 tristate "SCTP: Association probing"
42 depends on PROC_FS && KPROBES
43 ---help---
44 This module allows for capturing the changes to SCTP association
45 state in response to incoming packets. It is used for debugging
46 SCTP congestion control algorithms. If you don't understand
47 what was just said, you don't need it: say N.
48
49 To compile this code as a module, choose M here: the
50 module will be called sctp_probe.
51
40config SCTP_DBG_MSG 52config SCTP_DBG_MSG
41 bool "SCTP: Debug messages" 53 bool "SCTP: Debug messages"
42 help 54 help
diff --git a/net/sctp/Makefile b/net/sctp/Makefile
index 6b794734380a..5c30b7a873df 100644
--- a/net/sctp/Makefile
+++ b/net/sctp/Makefile
@@ -3,6 +3,7 @@
3# 3#
4 4
5obj-$(CONFIG_IP_SCTP) += sctp.o 5obj-$(CONFIG_IP_SCTP) += sctp.o
6obj-$(CONFIG_NET_SCTPPROBE) += sctp_probe.o
6 7
7sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \ 8sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
8 protocol.o endpointola.o associola.o \ 9 protocol.o endpointola.o associola.o \
@@ -11,6 +12,8 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
11 tsnmap.o bind_addr.o socket.o primitive.o \ 12 tsnmap.o bind_addr.o socket.o primitive.o \
12 output.o input.o debug.o ssnmap.o auth.o 13 output.o input.o debug.o ssnmap.o auth.o
13 14
15sctp_probe-y := probe.o
16
14sctp-$(CONFIG_SCTP_DBG_OBJCNT) += objcnt.o 17sctp-$(CONFIG_SCTP_DBG_OBJCNT) += objcnt.o
15sctp-$(CONFIG_PROC_FS) += proc.o 18sctp-$(CONFIG_PROC_FS) += proc.o
16sctp-$(CONFIG_SYSCTL) += sysctl.o 19sctp-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index df5abbff63e2..3912420cedcc 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -87,9 +87,6 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
87 /* Retrieve the SCTP per socket area. */ 87 /* Retrieve the SCTP per socket area. */
88 sp = sctp_sk((struct sock *)sk); 88 sp = sctp_sk((struct sock *)sk);
89 89
90 /* Init all variables to a known value. */
91 memset(asoc, 0, sizeof(struct sctp_association));
92
93 /* Discarding const is appropriate here. */ 90 /* Discarding const is appropriate here. */
94 asoc->ep = (struct sctp_endpoint *)ep; 91 asoc->ep = (struct sctp_endpoint *)ep;
95 sctp_endpoint_hold(asoc->ep); 92 sctp_endpoint_hold(asoc->ep);
@@ -762,7 +759,8 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
762 asoc->peer.retran_path = peer; 759 asoc->peer.retran_path = peer;
763 } 760 }
764 761
765 if (asoc->peer.active_path == asoc->peer.retran_path) { 762 if (asoc->peer.active_path == asoc->peer.retran_path &&
763 peer->state != SCTP_UNCONFIRMED) {
766 asoc->peer.retran_path = peer; 764 asoc->peer.retran_path = peer;
767 } 765 }
768 766
@@ -1194,8 +1192,10 @@ void sctp_assoc_update(struct sctp_association *asoc,
1194 /* Remove any peer addresses not present in the new association. */ 1192 /* Remove any peer addresses not present in the new association. */
1195 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { 1193 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1196 trans = list_entry(pos, struct sctp_transport, transports); 1194 trans = list_entry(pos, struct sctp_transport, transports);
1197 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) 1195 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1198 sctp_assoc_del_peer(asoc, &trans->ipaddr); 1196 sctp_assoc_rm_peer(asoc, trans);
1197 continue;
1198 }
1199 1199
1200 if (asoc->state >= SCTP_STATE_ESTABLISHED) 1200 if (asoc->state >= SCTP_STATE_ESTABLISHED)
1201 sctp_transport_reset(trans); 1201 sctp_transport_reset(trans);
@@ -1318,12 +1318,13 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1318 /* Keep track of the next transport in case 1318 /* Keep track of the next transport in case
1319 * we don't find any active transport. 1319 * we don't find any active transport.
1320 */ 1320 */
1321 if (!next) 1321 if (t->state != SCTP_UNCONFIRMED && !next)
1322 next = t; 1322 next = t;
1323 } 1323 }
1324 } 1324 }
1325 1325
1326 asoc->peer.retran_path = t; 1326 if (t)
1327 asoc->peer.retran_path = t;
1327 1328
1328 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association" 1329 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association"
1329 " %p addr: ", 1330 " %p addr: ",
@@ -1483,7 +1484,7 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len)
1483 if (asoc->rwnd >= len) { 1484 if (asoc->rwnd >= len) {
1484 asoc->rwnd -= len; 1485 asoc->rwnd -= len;
1485 if (over) { 1486 if (over) {
1486 asoc->rwnd_press = asoc->rwnd; 1487 asoc->rwnd_press += asoc->rwnd;
1487 asoc->rwnd = 0; 1488 asoc->rwnd = 0;
1488 } 1489 }
1489 } else { 1490 } else {
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 3eab6db59a37..476caaf100ed 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -58,9 +58,9 @@ static void sctp_datamsg_init(struct sctp_datamsg *msg)
58 msg->send_failed = 0; 58 msg->send_failed = 0;
59 msg->send_error = 0; 59 msg->send_error = 0;
60 msg->can_abandon = 0; 60 msg->can_abandon = 0;
61 msg->can_delay = 1;
61 msg->expires_at = 0; 62 msg->expires_at = 0;
62 INIT_LIST_HEAD(&msg->chunks); 63 INIT_LIST_HEAD(&msg->chunks);
63 msg->msg_size = 0;
64} 64}
65 65
66/* Allocate and initialize datamsg. */ 66/* Allocate and initialize datamsg. */
@@ -157,7 +157,6 @@ static void sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chu
157{ 157{
158 sctp_datamsg_hold(msg); 158 sctp_datamsg_hold(msg);
159 chunk->msg = msg; 159 chunk->msg = msg;
160 msg->msg_size += chunk->skb->len;
161} 160}
162 161
163 162
@@ -247,6 +246,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
247 if (msg_len >= first_len) { 246 if (msg_len >= first_len) {
248 msg_len -= first_len; 247 msg_len -= first_len;
249 whole = 1; 248 whole = 1;
249 msg->can_delay = 0;
250 } 250 }
251 251
252 /* How many full sized? How many bytes leftover? */ 252 /* How many full sized? How many bytes leftover? */
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 905fda582b92..e10acc01c75f 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -70,8 +70,6 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
70 struct sctp_shared_key *null_key; 70 struct sctp_shared_key *null_key;
71 int err; 71 int err;
72 72
73 memset(ep, 0, sizeof(struct sctp_endpoint));
74
75 ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp); 73 ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp);
76 if (!ep->digest) 74 if (!ep->digest)
77 return NULL; 75 return NULL;
@@ -144,6 +142,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
144 /* Use SCTP specific send buffer space queues. */ 142 /* Use SCTP specific send buffer space queues. */
145 ep->sndbuf_policy = sctp_sndbuf_policy; 143 ep->sndbuf_policy = sctp_sndbuf_policy;
146 144
145 sk->sk_data_ready = sctp_data_ready;
147 sk->sk_write_space = sctp_write_space; 146 sk->sk_write_space = sctp_write_space;
148 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 147 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
149 148
diff --git a/net/sctp/output.c b/net/sctp/output.c
index fad261d41ec2..a646681f5acd 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -429,24 +429,17 @@ int sctp_packet_transmit(struct sctp_packet *packet)
429 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { 429 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
430 list_del_init(&chunk->list); 430 list_del_init(&chunk->list);
431 if (sctp_chunk_is_data(chunk)) { 431 if (sctp_chunk_is_data(chunk)) {
432 /* 6.3.1 C4) When data is in flight and when allowed
433 * by rule C5, a new RTT measurement MUST be made each
434 * round trip. Furthermore, new RTT measurements
435 * SHOULD be made no more than once per round-trip
436 * for a given destination transport address.
437 */
432 438
433 if (!chunk->resent) { 439 if (!tp->rto_pending) {
434 440 chunk->rtt_in_progress = 1;
435 /* 6.3.1 C4) When data is in flight and when allowed 441 tp->rto_pending = 1;
436 * by rule C5, a new RTT measurement MUST be made each
437 * round trip. Furthermore, new RTT measurements
438 * SHOULD be made no more than once per round-trip
439 * for a given destination transport address.
440 */
441
442 if (!tp->rto_pending) {
443 chunk->rtt_in_progress = 1;
444 tp->rto_pending = 1;
445 }
446 } 442 }
447
448 chunk->resent = 1;
449
450 has_data = 1; 443 has_data = 1;
451 } 444 }
452 445
@@ -681,7 +674,7 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
681 * Don't delay large message writes that may have been 674 * Don't delay large message writes that may have been
682 * fragmeneted into small peices. 675 * fragmeneted into small peices.
683 */ 676 */
684 if ((len < max) && (chunk->msg->msg_size < max)) { 677 if ((len < max) && chunk->msg->can_delay) {
685 retval = SCTP_XMIT_NAGLE_DELAY; 678 retval = SCTP_XMIT_NAGLE_DELAY;
686 goto finish; 679 goto finish;
687 } 680 }
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index abfc0b8dee74..5d057178ce0c 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -62,7 +62,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
62 struct list_head *transmitted_queue, 62 struct list_head *transmitted_queue,
63 struct sctp_transport *transport, 63 struct sctp_transport *transport,
64 struct sctp_sackhdr *sack, 64 struct sctp_sackhdr *sack,
65 __u32 highest_new_tsn); 65 __u32 *highest_new_tsn);
66 66
67static void sctp_mark_missing(struct sctp_outq *q, 67static void sctp_mark_missing(struct sctp_outq *q,
68 struct list_head *transmitted_queue, 68 struct list_head *transmitted_queue,
@@ -308,7 +308,7 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
308 /* If it is data, queue it up, otherwise, send it 308 /* If it is data, queue it up, otherwise, send it
309 * immediately. 309 * immediately.
310 */ 310 */
311 if (SCTP_CID_DATA == chunk->chunk_hdr->type) { 311 if (sctp_chunk_is_data(chunk)) {
312 /* Is it OK to queue data chunks? */ 312 /* Is it OK to queue data chunks? */
313 /* From 9. Termination of Association 313 /* From 9. Termination of Association
314 * 314 *
@@ -598,11 +598,23 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
598 if (fast_rtx && !chunk->fast_retransmit) 598 if (fast_rtx && !chunk->fast_retransmit)
599 continue; 599 continue;
600 600
601redo:
601 /* Attempt to append this chunk to the packet. */ 602 /* Attempt to append this chunk to the packet. */
602 status = sctp_packet_append_chunk(pkt, chunk); 603 status = sctp_packet_append_chunk(pkt, chunk);
603 604
604 switch (status) { 605 switch (status) {
605 case SCTP_XMIT_PMTU_FULL: 606 case SCTP_XMIT_PMTU_FULL:
607 if (!pkt->has_data && !pkt->has_cookie_echo) {
608 /* If this packet did not contain DATA then
609 * retransmission did not happen, so do it
610 * again. We'll ignore the error here since
611 * control chunks are already freed so there
612 * is nothing we can do.
613 */
614 sctp_packet_transmit(pkt);
615 goto redo;
616 }
617
606 /* Send this packet. */ 618 /* Send this packet. */
607 error = sctp_packet_transmit(pkt); 619 error = sctp_packet_transmit(pkt);
608 620
@@ -647,14 +659,6 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
647 if (chunk->fast_retransmit == SCTP_NEED_FRTX) 659 if (chunk->fast_retransmit == SCTP_NEED_FRTX)
648 chunk->fast_retransmit = SCTP_DONT_FRTX; 660 chunk->fast_retransmit = SCTP_DONT_FRTX;
649 661
650 /* Force start T3-rtx timer when fast retransmitting
651 * the earliest outstanding TSN
652 */
653 if (!timer && fast_rtx &&
654 ntohl(chunk->subh.data_hdr->tsn) ==
655 asoc->ctsn_ack_point + 1)
656 timer = 2;
657
658 q->empty = 0; 662 q->empty = 0;
659 break; 663 break;
660 } 664 }
@@ -854,6 +858,12 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
854 if (status != SCTP_XMIT_OK) { 858 if (status != SCTP_XMIT_OK) {
855 /* put the chunk back */ 859 /* put the chunk back */
856 list_add(&chunk->list, &q->control_chunk_list); 860 list_add(&chunk->list, &q->control_chunk_list);
861 } else if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
862 /* PR-SCTP C5) If a FORWARD TSN is sent, the
863 * sender MUST assure that at least one T3-rtx
864 * timer is running.
865 */
866 sctp_transport_reset_timers(transport);
857 } 867 }
858 break; 868 break;
859 869
@@ -906,8 +916,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
906 rtx_timeout, &start_timer); 916 rtx_timeout, &start_timer);
907 917
908 if (start_timer) 918 if (start_timer)
909 sctp_transport_reset_timers(transport, 919 sctp_transport_reset_timers(transport);
910 start_timer-1);
911 920
912 /* This can happen on COOKIE-ECHO resend. Only 921 /* This can happen on COOKIE-ECHO resend. Only
913 * one chunk can get bundled with a COOKIE-ECHO. 922 * one chunk can get bundled with a COOKIE-ECHO.
@@ -1040,7 +1049,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
1040 list_add_tail(&chunk->transmitted_list, 1049 list_add_tail(&chunk->transmitted_list,
1041 &transport->transmitted); 1050 &transport->transmitted);
1042 1051
1043 sctp_transport_reset_timers(transport, 0); 1052 sctp_transport_reset_timers(transport);
1044 1053
1045 q->empty = 0; 1054 q->empty = 0;
1046 1055
@@ -1100,32 +1109,6 @@ static void sctp_sack_update_unack_data(struct sctp_association *assoc,
1100 assoc->unack_data = unack_data; 1109 assoc->unack_data = unack_data;
1101} 1110}
1102 1111
1103/* Return the highest new tsn that is acknowledged by the given SACK chunk. */
1104static __u32 sctp_highest_new_tsn(struct sctp_sackhdr *sack,
1105 struct sctp_association *asoc)
1106{
1107 struct sctp_transport *transport;
1108 struct sctp_chunk *chunk;
1109 __u32 highest_new_tsn, tsn;
1110 struct list_head *transport_list = &asoc->peer.transport_addr_list;
1111
1112 highest_new_tsn = ntohl(sack->cum_tsn_ack);
1113
1114 list_for_each_entry(transport, transport_list, transports) {
1115 list_for_each_entry(chunk, &transport->transmitted,
1116 transmitted_list) {
1117 tsn = ntohl(chunk->subh.data_hdr->tsn);
1118
1119 if (!chunk->tsn_gap_acked &&
1120 TSN_lt(highest_new_tsn, tsn) &&
1121 sctp_acked(sack, tsn))
1122 highest_new_tsn = tsn;
1123 }
1124 }
1125
1126 return highest_new_tsn;
1127}
1128
1129/* This is where we REALLY process a SACK. 1112/* This is where we REALLY process a SACK.
1130 * 1113 *
1131 * Process the SACK against the outqueue. Mostly, this just frees 1114 * Process the SACK against the outqueue. Mostly, this just frees
@@ -1145,6 +1128,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1145 struct sctp_transport *primary = asoc->peer.primary_path; 1128 struct sctp_transport *primary = asoc->peer.primary_path;
1146 int count_of_newacks = 0; 1129 int count_of_newacks = 0;
1147 int gap_ack_blocks; 1130 int gap_ack_blocks;
1131 u8 accum_moved = 0;
1148 1132
1149 /* Grab the association's destination address list. */ 1133 /* Grab the association's destination address list. */
1150 transport_list = &asoc->peer.transport_addr_list; 1134 transport_list = &asoc->peer.transport_addr_list;
@@ -1193,18 +1177,15 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1193 if (gap_ack_blocks) 1177 if (gap_ack_blocks)
1194 highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end); 1178 highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end);
1195 1179
1196 if (TSN_lt(asoc->highest_sacked, highest_tsn)) { 1180 if (TSN_lt(asoc->highest_sacked, highest_tsn))
1197 highest_new_tsn = highest_tsn;
1198 asoc->highest_sacked = highest_tsn; 1181 asoc->highest_sacked = highest_tsn;
1199 } else {
1200 highest_new_tsn = sctp_highest_new_tsn(sack, asoc);
1201 }
1202 1182
1183 highest_new_tsn = sack_ctsn;
1203 1184
1204 /* Run through the retransmit queue. Credit bytes received 1185 /* Run through the retransmit queue. Credit bytes received
1205 * and free those chunks that we can. 1186 * and free those chunks that we can.
1206 */ 1187 */
1207 sctp_check_transmitted(q, &q->retransmit, NULL, sack, highest_new_tsn); 1188 sctp_check_transmitted(q, &q->retransmit, NULL, sack, &highest_new_tsn);
1208 1189
1209 /* Run through the transmitted queue. 1190 /* Run through the transmitted queue.
1210 * Credit bytes received and free those chunks which we can. 1191 * Credit bytes received and free those chunks which we can.
@@ -1213,7 +1194,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1213 */ 1194 */
1214 list_for_each_entry(transport, transport_list, transports) { 1195 list_for_each_entry(transport, transport_list, transports) {
1215 sctp_check_transmitted(q, &transport->transmitted, 1196 sctp_check_transmitted(q, &transport->transmitted,
1216 transport, sack, highest_new_tsn); 1197 transport, sack, &highest_new_tsn);
1217 /* 1198 /*
1218 * SFR-CACC algorithm: 1199 * SFR-CACC algorithm:
1219 * C) Let count_of_newacks be the number of 1200 * C) Let count_of_newacks be the number of
@@ -1223,16 +1204,22 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1223 count_of_newacks ++; 1204 count_of_newacks ++;
1224 } 1205 }
1225 1206
1207 /* Move the Cumulative TSN Ack Point if appropriate. */
1208 if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) {
1209 asoc->ctsn_ack_point = sack_ctsn;
1210 accum_moved = 1;
1211 }
1212
1226 if (gap_ack_blocks) { 1213 if (gap_ack_blocks) {
1214
1215 if (asoc->fast_recovery && accum_moved)
1216 highest_new_tsn = highest_tsn;
1217
1227 list_for_each_entry(transport, transport_list, transports) 1218 list_for_each_entry(transport, transport_list, transports)
1228 sctp_mark_missing(q, &transport->transmitted, transport, 1219 sctp_mark_missing(q, &transport->transmitted, transport,
1229 highest_new_tsn, count_of_newacks); 1220 highest_new_tsn, count_of_newacks);
1230 } 1221 }
1231 1222
1232 /* Move the Cumulative TSN Ack Point if appropriate. */
1233 if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn))
1234 asoc->ctsn_ack_point = sack_ctsn;
1235
1236 /* Update unack_data field in the assoc. */ 1223 /* Update unack_data field in the assoc. */
1237 sctp_sack_update_unack_data(asoc, sack); 1224 sctp_sack_update_unack_data(asoc, sack);
1238 1225
@@ -1315,7 +1302,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1315 struct list_head *transmitted_queue, 1302 struct list_head *transmitted_queue,
1316 struct sctp_transport *transport, 1303 struct sctp_transport *transport,
1317 struct sctp_sackhdr *sack, 1304 struct sctp_sackhdr *sack,
1318 __u32 highest_new_tsn_in_sack) 1305 __u32 *highest_new_tsn_in_sack)
1319{ 1306{
1320 struct list_head *lchunk; 1307 struct list_head *lchunk;
1321 struct sctp_chunk *tchunk; 1308 struct sctp_chunk *tchunk;
@@ -1387,7 +1374,6 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1387 * instance). 1374 * instance).
1388 */ 1375 */
1389 if (!tchunk->tsn_gap_acked && 1376 if (!tchunk->tsn_gap_acked &&
1390 !tchunk->resent &&
1391 tchunk->rtt_in_progress) { 1377 tchunk->rtt_in_progress) {
1392 tchunk->rtt_in_progress = 0; 1378 tchunk->rtt_in_progress = 0;
1393 rtt = jiffies - tchunk->sent_at; 1379 rtt = jiffies - tchunk->sent_at;
@@ -1404,6 +1390,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1404 */ 1390 */
1405 if (!tchunk->tsn_gap_acked) { 1391 if (!tchunk->tsn_gap_acked) {
1406 tchunk->tsn_gap_acked = 1; 1392 tchunk->tsn_gap_acked = 1;
1393 *highest_new_tsn_in_sack = tsn;
1407 bytes_acked += sctp_data_size(tchunk); 1394 bytes_acked += sctp_data_size(tchunk);
1408 if (!tchunk->transport) 1395 if (!tchunk->transport)
1409 migrate_bytes += sctp_data_size(tchunk); 1396 migrate_bytes += sctp_data_size(tchunk);
@@ -1677,7 +1664,8 @@ static void sctp_mark_missing(struct sctp_outq *q,
1677 struct sctp_chunk *chunk; 1664 struct sctp_chunk *chunk;
1678 __u32 tsn; 1665 __u32 tsn;
1679 char do_fast_retransmit = 0; 1666 char do_fast_retransmit = 0;
1680 struct sctp_transport *primary = q->asoc->peer.primary_path; 1667 struct sctp_association *asoc = q->asoc;
1668 struct sctp_transport *primary = asoc->peer.primary_path;
1681 1669
1682 list_for_each_entry(chunk, transmitted_queue, transmitted_list) { 1670 list_for_each_entry(chunk, transmitted_queue, transmitted_list) {
1683 1671
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
new file mode 100644
index 000000000000..db3a42b8b349
--- /dev/null
+++ b/net/sctp/probe.c
@@ -0,0 +1,214 @@
1/*
2 * sctp_probe - Observe the SCTP flow with kprobes.
3 *
4 * The idea for this came from Werner Almesberger's umlsim
5 * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
6 *
7 * Modified for SCTP from Stephen Hemminger's code
8 * Copyright (C) 2010, Wei Yongjun <yjwei@cn.fujitsu.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <linux/kernel.h>
26#include <linux/kprobes.h>
27#include <linux/socket.h>
28#include <linux/sctp.h>
29#include <linux/proc_fs.h>
30#include <linux/vmalloc.h>
31#include <linux/module.h>
32#include <linux/kfifo.h>
33#include <linux/time.h>
34#include <net/net_namespace.h>
35
36#include <net/sctp/sctp.h>
37#include <net/sctp/sm.h>
38
39MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>");
40MODULE_DESCRIPTION("SCTP snooper");
41MODULE_LICENSE("GPL");
42
43static int port __read_mostly = 0;
44MODULE_PARM_DESC(port, "Port to match (0=all)");
45module_param(port, int, 0);
46
47static int bufsize __read_mostly = 64 * 1024;
48MODULE_PARM_DESC(bufsize, "Log buffer size (default 64k)");
49module_param(bufsize, int, 0);
50
51static int full __read_mostly = 1;
52MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)");
53module_param(full, int, 0);
54
55static const char procname[] = "sctpprobe";
56
57static struct {
58 struct kfifo fifo;
59 spinlock_t lock;
60 wait_queue_head_t wait;
61 struct timespec tstart;
62} sctpw;
63
64static void printl(const char *fmt, ...)
65{
66 va_list args;
67 int len;
68 char tbuf[256];
69
70 va_start(args, fmt);
71 len = vscnprintf(tbuf, sizeof(tbuf), fmt, args);
72 va_end(args);
73
74 kfifo_in_locked(&sctpw.fifo, tbuf, len, &sctpw.lock);
75 wake_up(&sctpw.wait);
76}
77
78static int sctpprobe_open(struct inode *inode, struct file *file)
79{
80 kfifo_reset(&sctpw.fifo);
81 getnstimeofday(&sctpw.tstart);
82
83 return 0;
84}
85
86static ssize_t sctpprobe_read(struct file *file, char __user *buf,
87 size_t len, loff_t *ppos)
88{
89 int error = 0, cnt = 0;
90 unsigned char *tbuf;
91
92 if (!buf)
93 return -EINVAL;
94
95 if (len == 0)
96 return 0;
97
98 tbuf = vmalloc(len);
99 if (!tbuf)
100 return -ENOMEM;
101
102 error = wait_event_interruptible(sctpw.wait,
103 kfifo_len(&sctpw.fifo) != 0);
104 if (error)
105 goto out_free;
106
107 cnt = kfifo_out_locked(&sctpw.fifo, tbuf, len, &sctpw.lock);
108 error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0;
109
110out_free:
111 vfree(tbuf);
112
113 return error ? error : cnt;
114}
115
116static const struct file_operations sctpprobe_fops = {
117 .owner = THIS_MODULE,
118 .open = sctpprobe_open,
119 .read = sctpprobe_read,
120};
121
122sctp_disposition_t jsctp_sf_eat_sack(const struct sctp_endpoint *ep,
123 const struct sctp_association *asoc,
124 const sctp_subtype_t type,
125 void *arg,
126 sctp_cmd_seq_t *commands)
127{
128 struct sctp_transport *sp;
129 static __u32 lcwnd = 0;
130 struct timespec now;
131
132 sp = asoc->peer.primary_path;
133
134 if ((full || sp->cwnd != lcwnd) &&
135 (!port || asoc->peer.port == port ||
136 ep->base.bind_addr.port == port)) {
137 lcwnd = sp->cwnd;
138
139 getnstimeofday(&now);
140 now = timespec_sub(now, sctpw.tstart);
141
142 printl("%lu.%06lu ", (unsigned long) now.tv_sec,
143 (unsigned long) now.tv_nsec / NSEC_PER_USEC);
144
145 printl("%p %5d %5d %5d %8d %5d ", asoc,
146 ep->base.bind_addr.port, asoc->peer.port,
147 asoc->pathmtu, asoc->peer.rwnd, asoc->unack_data);
148
149 list_for_each_entry(sp, &asoc->peer.transport_addr_list,
150 transports) {
151 if (sp == asoc->peer.primary_path)
152 printl("*");
153
154 if (sp->ipaddr.sa.sa_family == AF_INET)
155 printl("%pI4 ", &sp->ipaddr.v4.sin_addr);
156 else
157 printl("%pI6 ", &sp->ipaddr.v6.sin6_addr);
158
159 printl("%2u %8u %8u %8u %8u %8u ",
160 sp->state, sp->cwnd, sp->ssthresh,
161 sp->flight_size, sp->partial_bytes_acked,
162 sp->pathmtu);
163 }
164 printl("\n");
165 }
166
167 jprobe_return();
168 return 0;
169}
170
171static struct jprobe sctp_recv_probe = {
172 .kp = {
173 .symbol_name = "sctp_sf_eat_sack_6_2",
174 },
175 .entry = jsctp_sf_eat_sack,
176};
177
178static __init int sctpprobe_init(void)
179{
180 int ret = -ENOMEM;
181
182 init_waitqueue_head(&sctpw.wait);
183 spin_lock_init(&sctpw.lock);
184 if (kfifo_alloc(&sctpw.fifo, bufsize, GFP_KERNEL))
185 return ret;
186
187 if (!proc_net_fops_create(&init_net, procname, S_IRUSR,
188 &sctpprobe_fops))
189 goto free_kfifo;
190
191 ret = register_jprobe(&sctp_recv_probe);
192 if (ret)
193 goto remove_proc;
194
195 pr_info("SCTP probe registered (port=%d)\n", port);
196
197 return 0;
198
199remove_proc:
200 proc_net_remove(&init_net, procname);
201free_kfifo:
202 kfifo_free(&sctpw.fifo);
203 return ret;
204}
205
206static __exit void sctpprobe_exit(void)
207{
208 kfifo_free(&sctpw.fifo);
209 proc_net_remove(&init_net, procname);
210 unregister_jprobe(&sctp_recv_probe);
211}
212
213module_init(sctpprobe_init);
214module_exit(sctpprobe_exit);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 704298f4b284..182749867c72 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -474,13 +474,17 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
474 474
475 memset(&fl, 0x0, sizeof(struct flowi)); 475 memset(&fl, 0x0, sizeof(struct flowi));
476 fl.fl4_dst = daddr->v4.sin_addr.s_addr; 476 fl.fl4_dst = daddr->v4.sin_addr.s_addr;
477 fl.fl_ip_dport = daddr->v4.sin_port;
477 fl.proto = IPPROTO_SCTP; 478 fl.proto = IPPROTO_SCTP;
478 if (asoc) { 479 if (asoc) {
479 fl.fl4_tos = RT_CONN_FLAGS(asoc->base.sk); 480 fl.fl4_tos = RT_CONN_FLAGS(asoc->base.sk);
480 fl.oif = asoc->base.sk->sk_bound_dev_if; 481 fl.oif = asoc->base.sk->sk_bound_dev_if;
482 fl.fl_ip_sport = htons(asoc->base.bind_addr.port);
481 } 483 }
482 if (saddr) 484 if (saddr) {
483 fl.fl4_src = saddr->v4.sin_addr.s_addr; 485 fl.fl4_src = saddr->v4.sin_addr.s_addr;
486 fl.fl_ip_sport = saddr->v4.sin_port;
487 }
484 488
485 SCTP_DEBUG_PRINTK("%s: DST:%pI4, SRC:%pI4 - ", 489 SCTP_DEBUG_PRINTK("%s: DST:%pI4, SRC:%pI4 - ",
486 __func__, &fl.fl4_dst, &fl.fl4_src); 490 __func__, &fl.fl4_dst, &fl.fl4_src);
@@ -528,6 +532,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
528 if ((laddr->state == SCTP_ADDR_SRC) && 532 if ((laddr->state == SCTP_ADDR_SRC) &&
529 (AF_INET == laddr->a.sa.sa_family)) { 533 (AF_INET == laddr->a.sa.sa_family)) {
530 fl.fl4_src = laddr->a.v4.sin_addr.s_addr; 534 fl.fl4_src = laddr->a.v4.sin_addr.s_addr;
535 fl.fl_ip_sport = laddr->a.v4.sin_port;
531 if (!ip_route_output_key(&init_net, &rt, &fl)) { 536 if (!ip_route_output_key(&init_net, &rt, &fl)) {
532 dst = &rt->u.dst; 537 dst = &rt->u.dst;
533 goto out_unlock; 538 goto out_unlock;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 17cb400ecd6a..d8261f3d7715 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -108,7 +108,7 @@ static const struct sctp_paramhdr prsctp_param = {
108 cpu_to_be16(sizeof(struct sctp_paramhdr)), 108 cpu_to_be16(sizeof(struct sctp_paramhdr)),
109}; 109};
110 110
111/* A helper to initialize to initialize an op error inside a 111/* A helper to initialize an op error inside a
112 * provided chunk, as most cause codes will be embedded inside an 112 * provided chunk, as most cause codes will be embedded inside an
113 * abort chunk. 113 * abort chunk.
114 */ 114 */
@@ -125,6 +125,29 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
125 chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); 125 chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
126} 126}
127 127
128/* A helper to initialize an op error inside a
129 * provided chunk, as most cause codes will be embedded inside an
130 * abort chunk. Differs from sctp_init_cause in that it won't oops
131 * if there isn't enough space in the op error chunk
132 */
133int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code,
134 size_t paylen)
135{
136 sctp_errhdr_t err;
137 __u16 len;
138
139 /* Cause code constants are now defined in network order. */
140 err.cause = cause_code;
141 len = sizeof(sctp_errhdr_t) + paylen;
142 err.length = htons(len);
143
144 if (skb_tailroom(chunk->skb) > len)
145 return -ENOSPC;
146 chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk,
147 sizeof(sctp_errhdr_t),
148 &err);
149 return 0;
150}
128/* 3.3.2 Initiation (INIT) (1) 151/* 3.3.2 Initiation (INIT) (1)
129 * 152 *
130 * This chunk is used to initiate a SCTP association between two 153 * This chunk is used to initiate a SCTP association between two
@@ -208,7 +231,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
208 sp = sctp_sk(asoc->base.sk); 231 sp = sctp_sk(asoc->base.sk);
209 num_types = sp->pf->supported_addrs(sp, types); 232 num_types = sp->pf->supported_addrs(sp, types);
210 233
211 chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types); 234 chunksize = sizeof(init) + addrs_len;
235 chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types));
212 chunksize += sizeof(ecap_param); 236 chunksize += sizeof(ecap_param);
213 237
214 if (sctp_prsctp_enable) 238 if (sctp_prsctp_enable)
@@ -238,14 +262,14 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
238 /* Add HMACS parameter length if any were defined */ 262 /* Add HMACS parameter length if any were defined */
239 auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; 263 auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
240 if (auth_hmacs->length) 264 if (auth_hmacs->length)
241 chunksize += ntohs(auth_hmacs->length); 265 chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
242 else 266 else
243 auth_hmacs = NULL; 267 auth_hmacs = NULL;
244 268
245 /* Add CHUNKS parameter length */ 269 /* Add CHUNKS parameter length */
246 auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; 270 auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
247 if (auth_chunks->length) 271 if (auth_chunks->length)
248 chunksize += ntohs(auth_chunks->length); 272 chunksize += WORD_ROUND(ntohs(auth_chunks->length));
249 else 273 else
250 auth_chunks = NULL; 274 auth_chunks = NULL;
251 275
@@ -255,7 +279,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
255 279
256 /* If we have any extensions to report, account for that */ 280 /* If we have any extensions to report, account for that */
257 if (num_ext) 281 if (num_ext)
258 chunksize += sizeof(sctp_supported_ext_param_t) + num_ext; 282 chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
283 num_ext);
259 284
260 /* RFC 2960 3.3.2 Initiation (INIT) (1) 285 /* RFC 2960 3.3.2 Initiation (INIT) (1)
261 * 286 *
@@ -397,13 +422,13 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
397 422
398 auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; 423 auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
399 if (auth_hmacs->length) 424 if (auth_hmacs->length)
400 chunksize += ntohs(auth_hmacs->length); 425 chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
401 else 426 else
402 auth_hmacs = NULL; 427 auth_hmacs = NULL;
403 428
404 auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; 429 auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
405 if (auth_chunks->length) 430 if (auth_chunks->length)
406 chunksize += ntohs(auth_chunks->length); 431 chunksize += WORD_ROUND(ntohs(auth_chunks->length));
407 else 432 else
408 auth_chunks = NULL; 433 auth_chunks = NULL;
409 434
@@ -412,17 +437,25 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
412 } 437 }
413 438
414 if (num_ext) 439 if (num_ext)
415 chunksize += sizeof(sctp_supported_ext_param_t) + num_ext; 440 chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
441 num_ext);
416 442
417 /* Now allocate and fill out the chunk. */ 443 /* Now allocate and fill out the chunk. */
418 retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize); 444 retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize);
419 if (!retval) 445 if (!retval)
420 goto nomem_chunk; 446 goto nomem_chunk;
421 447
422 /* Per the advice in RFC 2960 6.4, send this reply to 448 /* RFC 2960 6.4 Multi-homed SCTP Endpoints
423 * the source of the INIT packet. 449 *
450 * An endpoint SHOULD transmit reply chunks (e.g., SACK,
451 * HEARTBEAT ACK, * etc.) to the same destination transport
452 * address from which it received the DATA or control chunk
453 * to which it is replying.
454 *
455 * [INIT ACK back to where the INIT came from.]
424 */ 456 */
425 retval->transport = chunk->transport; 457 retval->transport = chunk->transport;
458
426 retval->subh.init_hdr = 459 retval->subh.init_hdr =
427 sctp_addto_chunk(retval, sizeof(initack), &initack); 460 sctp_addto_chunk(retval, sizeof(initack), &initack);
428 retval->param_hdr.v = sctp_addto_chunk(retval, addrs_len, addrs.v); 461 retval->param_hdr.v = sctp_addto_chunk(retval, addrs_len, addrs.v);
@@ -461,18 +494,6 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
461 /* We need to remove the const qualifier at this point. */ 494 /* We need to remove the const qualifier at this point. */
462 retval->asoc = (struct sctp_association *) asoc; 495 retval->asoc = (struct sctp_association *) asoc;
463 496
464 /* RFC 2960 6.4 Multi-homed SCTP Endpoints
465 *
466 * An endpoint SHOULD transmit reply chunks (e.g., SACK,
467 * HEARTBEAT ACK, * etc.) to the same destination transport
468 * address from which it received the DATA or control chunk
469 * to which it is replying.
470 *
471 * [INIT ACK back to where the INIT came from.]
472 */
473 if (chunk)
474 retval->transport = chunk->transport;
475
476nomem_chunk: 497nomem_chunk:
477 kfree(cookie); 498 kfree(cookie);
478nomem_cookie: 499nomem_cookie:
@@ -1129,6 +1150,24 @@ nodata:
1129 return retval; 1150 return retval;
1130} 1151}
1131 1152
1153/* Create an Operation Error chunk of a fixed size,
1154 * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT)
1155 * This is a helper function to allocate an error chunk for
1156 * for those invalid parameter codes in which we may not want
1157 * to report all the errors, if the incomming chunk is large
1158 */
1159static inline struct sctp_chunk *sctp_make_op_error_fixed(
1160 const struct sctp_association *asoc,
1161 const struct sctp_chunk *chunk)
1162{
1163 size_t size = asoc ? asoc->pathmtu : 0;
1164
1165 if (!size)
1166 size = SCTP_DEFAULT_MAXSEGMENT;
1167
1168 return sctp_make_op_error_space(asoc, chunk, size);
1169}
1170
1132/* Create an Operation Error chunk. */ 1171/* Create an Operation Error chunk. */
1133struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, 1172struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
1134 const struct sctp_chunk *chunk, 1173 const struct sctp_chunk *chunk,
@@ -1210,7 +1249,6 @@ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb,
1210 INIT_LIST_HEAD(&retval->list); 1249 INIT_LIST_HEAD(&retval->list);
1211 retval->skb = skb; 1250 retval->skb = skb;
1212 retval->asoc = (struct sctp_association *)asoc; 1251 retval->asoc = (struct sctp_association *)asoc;
1213 retval->resent = 0;
1214 retval->has_tsn = 0; 1252 retval->has_tsn = 0;
1215 retval->has_ssn = 0; 1253 retval->has_ssn = 0;
1216 retval->rtt_in_progress = 0; 1254 retval->rtt_in_progress = 0;
@@ -1371,6 +1409,18 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
1371 return target; 1409 return target;
1372} 1410}
1373 1411
1412/* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient
1413 * space in the chunk
1414 */
1415void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk,
1416 int len, const void *data)
1417{
1418 if (skb_tailroom(chunk->skb) > len)
1419 return sctp_addto_chunk(chunk, len, data);
1420 else
1421 return NULL;
1422}
1423
1374/* Append bytes from user space to the end of a chunk. Will panic if 1424/* Append bytes from user space to the end of a chunk. Will panic if
1375 * chunk is not big enough. 1425 * chunk is not big enough.
1376 * Returns a kernel err value. 1426 * Returns a kernel err value.
@@ -1974,13 +2024,12 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
1974 * returning multiple unknown parameters. 2024 * returning multiple unknown parameters.
1975 */ 2025 */
1976 if (NULL == *errp) 2026 if (NULL == *errp)
1977 *errp = sctp_make_op_error_space(asoc, chunk, 2027 *errp = sctp_make_op_error_fixed(asoc, chunk);
1978 ntohs(chunk->chunk_hdr->length));
1979 2028
1980 if (*errp) { 2029 if (*errp) {
1981 sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, 2030 sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
1982 WORD_ROUND(ntohs(param.p->length))); 2031 WORD_ROUND(ntohs(param.p->length)));
1983 sctp_addto_chunk(*errp, 2032 sctp_addto_chunk_fixed(*errp,
1984 WORD_ROUND(ntohs(param.p->length)), 2033 WORD_ROUND(ntohs(param.p->length)),
1985 param.v); 2034 param.v);
1986 } else { 2035 } else {
@@ -3315,21 +3364,6 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
3315 sctp_chunk_free(asconf); 3364 sctp_chunk_free(asconf);
3316 asoc->addip_last_asconf = NULL; 3365 asoc->addip_last_asconf = NULL;
3317 3366
3318 /* Send the next asconf chunk from the addip chunk queue. */
3319 if (!list_empty(&asoc->addip_chunk_list)) {
3320 struct list_head *entry = asoc->addip_chunk_list.next;
3321 asconf = list_entry(entry, struct sctp_chunk, list);
3322
3323 list_del_init(entry);
3324
3325 /* Hold the chunk until an ASCONF_ACK is received. */
3326 sctp_chunk_hold(asconf);
3327 if (sctp_primitive_ASCONF(asoc, asconf))
3328 sctp_chunk_free(asconf);
3329 else
3330 asoc->addip_last_asconf = asconf;
3331 }
3332
3333 return retval; 3367 return retval;
3334} 3368}
3335 3369
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 4c5bed9af4e3..3b7230ef77c2 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -697,11 +697,15 @@ static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds,
697{ 697{
698 struct sctp_transport *t; 698 struct sctp_transport *t;
699 699
700 t = sctp_assoc_choose_alter_transport(asoc, 700 if (chunk->transport)
701 t = chunk->transport;
702 else {
703 t = sctp_assoc_choose_alter_transport(asoc,
701 asoc->shutdown_last_sent_to); 704 asoc->shutdown_last_sent_to);
705 chunk->transport = t;
706 }
702 asoc->shutdown_last_sent_to = t; 707 asoc->shutdown_last_sent_to = t;
703 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto; 708 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto;
704 chunk->transport = t;
705} 709}
706 710
707/* Helper function to change the state of an association. */ 711/* Helper function to change the state of an association. */
@@ -962,6 +966,29 @@ static int sctp_cmd_send_msg(struct sctp_association *asoc,
962} 966}
963 967
964 968
969/* Sent the next ASCONF packet currently stored in the association.
970 * This happens after the ASCONF_ACK was succeffully processed.
971 */
972static void sctp_cmd_send_asconf(struct sctp_association *asoc)
973{
974 /* Send the next asconf chunk from the addip chunk
975 * queue.
976 */
977 if (!list_empty(&asoc->addip_chunk_list)) {
978 struct list_head *entry = asoc->addip_chunk_list.next;
979 struct sctp_chunk *asconf = list_entry(entry,
980 struct sctp_chunk, list);
981 list_del_init(entry);
982
983 /* Hold the chunk until an ASCONF_ACK is received. */
984 sctp_chunk_hold(asconf);
985 if (sctp_primitive_ASCONF(asoc, asconf))
986 sctp_chunk_free(asconf);
987 else
988 asoc->addip_last_asconf = asconf;
989 }
990}
991
965 992
966/* These three macros allow us to pull the debugging code out of the 993/* These three macros allow us to pull the debugging code out of the
967 * main flow of sctp_do_sm() to keep attention focused on the real 994 * main flow of sctp_do_sm() to keep attention focused on the real
@@ -1617,6 +1644,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1617 } 1644 }
1618 error = sctp_cmd_send_msg(asoc, cmd->obj.msg); 1645 error = sctp_cmd_send_msg(asoc, cmd->obj.msg);
1619 break; 1646 break;
1647 case SCTP_CMD_SEND_NEXT_ASCONF:
1648 sctp_cmd_send_asconf(asoc);
1649 break;
1620 default: 1650 default:
1621 printk(KERN_WARNING "Impossible command: %u, %p\n", 1651 printk(KERN_WARNING "Impossible command: %u, %p\n",
1622 cmd->verb, cmd->obj.ptr); 1652 cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index abf601a1b847..24b2cd555637 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3676,8 +3676,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3676 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); 3676 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
3677 3677
3678 if (!sctp_process_asconf_ack((struct sctp_association *)asoc, 3678 if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
3679 asconf_ack)) 3679 asconf_ack)) {
3680 /* Successfully processed ASCONF_ACK. We can
3681 * release the next asconf if we have one.
3682 */
3683 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
3684 SCTP_NULL());
3680 return SCTP_DISPOSITION_CONSUME; 3685 return SCTP_DISPOSITION_CONSUME;
3686 }
3681 3687
3682 abort = sctp_make_abort(asoc, asconf_ack, 3688 abort = sctp_make_abort(asoc, asconf_ack,
3683 sizeof(sctp_errhdr_t)); 3689 sizeof(sctp_errhdr_t));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index c1941276f6e3..ba1add0b13c3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3719,12 +3719,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3719 sp->hmac = NULL; 3719 sp->hmac = NULL;
3720 3720
3721 SCTP_DBG_OBJCNT_INC(sock); 3721 SCTP_DBG_OBJCNT_INC(sock);
3722 percpu_counter_inc(&sctp_sockets_allocated);
3723
3724 /* Set socket backlog limit. */
3725 sk->sk_backlog.limit = sysctl_sctp_rmem[1];
3726 3722
3727 local_bh_disable(); 3723 local_bh_disable();
3724 percpu_counter_inc(&sctp_sockets_allocated);
3728 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 3725 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
3729 local_bh_enable(); 3726 local_bh_enable();
3730 3727
@@ -3741,8 +3738,8 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
3741 /* Release our hold on the endpoint. */ 3738 /* Release our hold on the endpoint. */
3742 ep = sctp_sk(sk)->ep; 3739 ep = sctp_sk(sk)->ep;
3743 sctp_endpoint_free(ep); 3740 sctp_endpoint_free(ep);
3744 percpu_counter_dec(&sctp_sockets_allocated);
3745 local_bh_disable(); 3741 local_bh_disable();
3742 percpu_counter_dec(&sctp_sockets_allocated);
3746 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 3743 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
3747 local_bh_enable(); 3744 local_bh_enable();
3748} 3745}
@@ -4387,7 +4384,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
4387 transports) { 4384 transports) {
4388 memcpy(&temp, &from->ipaddr, sizeof(temp)); 4385 memcpy(&temp, &from->ipaddr, sizeof(temp));
4389 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); 4386 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
4390 addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len; 4387 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
4391 if (space_left < addrlen) 4388 if (space_left < addrlen)
4392 return -ENOMEM; 4389 return -ENOMEM;
4393 if (copy_to_user(to, &temp, addrlen)) 4390 if (copy_to_user(to, &temp, addrlen))
@@ -5702,7 +5699,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
5702 struct sctp_sock *sp = sctp_sk(sk); 5699 struct sctp_sock *sp = sctp_sk(sk);
5703 unsigned int mask; 5700 unsigned int mask;
5704 5701
5705 poll_wait(file, sk->sk_sleep, wait); 5702 poll_wait(file, sk_sleep(sk), wait);
5706 5703
5707 /* A TCP-style listening socket becomes readable when the accept queue 5704 /* A TCP-style listening socket becomes readable when the accept queue
5708 * is not empty. 5705 * is not empty.
@@ -5943,7 +5940,7 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)
5943 int error; 5940 int error;
5944 DEFINE_WAIT(wait); 5941 DEFINE_WAIT(wait);
5945 5942
5946 prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 5943 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
5947 5944
5948 /* Socket errors? */ 5945 /* Socket errors? */
5949 error = sock_error(sk); 5946 error = sock_error(sk);
@@ -5980,14 +5977,14 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)
5980 sctp_lock_sock(sk); 5977 sctp_lock_sock(sk);
5981 5978
5982ready: 5979ready:
5983 finish_wait(sk->sk_sleep, &wait); 5980 finish_wait(sk_sleep(sk), &wait);
5984 return 0; 5981 return 0;
5985 5982
5986interrupted: 5983interrupted:
5987 error = sock_intr_errno(*timeo_p); 5984 error = sock_intr_errno(*timeo_p);
5988 5985
5989out: 5986out:
5990 finish_wait(sk->sk_sleep, &wait); 5987 finish_wait(sk_sleep(sk), &wait);
5991 *err = error; 5988 *err = error;
5992 return error; 5989 return error;
5993} 5990}
@@ -6061,14 +6058,14 @@ static void __sctp_write_space(struct sctp_association *asoc)
6061 wake_up_interruptible(&asoc->wait); 6058 wake_up_interruptible(&asoc->wait);
6062 6059
6063 if (sctp_writeable(sk)) { 6060 if (sctp_writeable(sk)) {
6064 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 6061 if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
6065 wake_up_interruptible(sk->sk_sleep); 6062 wake_up_interruptible(sk_sleep(sk));
6066 6063
6067 /* Note that we try to include the Async I/O support 6064 /* Note that we try to include the Async I/O support
6068 * here by modeling from the current TCP/UDP code. 6065 * here by modeling from the current TCP/UDP code.
6069 * We have not tested with it yet. 6066 * We have not tested with it yet.
6070 */ 6067 */
6071 if (sock->fasync_list && 6068 if (sock->wq->fasync_list &&
6072 !(sk->sk_shutdown & SEND_SHUTDOWN)) 6069 !(sk->sk_shutdown & SEND_SHUTDOWN))
6073 sock_wake_async(sock, 6070 sock_wake_async(sock,
6074 SOCK_WAKE_SPACE, POLL_OUT); 6071 SOCK_WAKE_SPACE, POLL_OUT);
@@ -6188,6 +6185,19 @@ do_nonblock:
6188 goto out; 6185 goto out;
6189} 6186}
6190 6187
6188void sctp_data_ready(struct sock *sk, int len)
6189{
6190 struct socket_wq *wq;
6191
6192 rcu_read_lock();
6193 wq = rcu_dereference(sk->sk_wq);
6194 if (wq_has_sleeper(wq))
6195 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
6196 POLLRDNORM | POLLRDBAND);
6197 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
6198 rcu_read_unlock();
6199}
6200
6191/* If socket sndbuf has changed, wake up all per association waiters. */ 6201/* If socket sndbuf has changed, wake up all per association waiters. */
6192void sctp_write_space(struct sock *sk) 6202void sctp_write_space(struct sock *sk)
6193{ 6203{
@@ -6296,7 +6306,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
6296 6306
6297 6307
6298 for (;;) { 6308 for (;;) {
6299 prepare_to_wait_exclusive(sk->sk_sleep, &wait, 6309 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
6300 TASK_INTERRUPTIBLE); 6310 TASK_INTERRUPTIBLE);
6301 6311
6302 if (list_empty(&ep->asocs)) { 6312 if (list_empty(&ep->asocs)) {
@@ -6322,7 +6332,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
6322 break; 6332 break;
6323 } 6333 }
6324 6334
6325 finish_wait(sk->sk_sleep, &wait); 6335 finish_wait(sk_sleep(sk), &wait);
6326 6336
6327 return err; 6337 return err;
6328} 6338}
@@ -6332,7 +6342,7 @@ static void sctp_wait_for_close(struct sock *sk, long timeout)
6332 DEFINE_WAIT(wait); 6342 DEFINE_WAIT(wait);
6333 6343
6334 do { 6344 do {
6335 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 6345 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
6336 if (list_empty(&sctp_sk(sk)->ep->asocs)) 6346 if (list_empty(&sctp_sk(sk)->ep->asocs))
6337 break; 6347 break;
6338 sctp_release_sock(sk); 6348 sctp_release_sock(sk);
@@ -6340,7 +6350,7 @@ static void sctp_wait_for_close(struct sock *sk, long timeout)
6340 sctp_lock_sock(sk); 6350 sctp_lock_sock(sk);
6341 } while (!signal_pending(current) && timeout); 6351 } while (!signal_pending(current) && timeout);
6342 6352
6343 finish_wait(sk->sk_sleep, &wait); 6353 finish_wait(sk_sleep(sk), &wait);
6344} 6354}
6345 6355
6346static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) 6356static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index be4d63d5a5cc..fccf4947aff1 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -64,9 +64,6 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
64 /* Copy in the address. */ 64 /* Copy in the address. */
65 peer->ipaddr = *addr; 65 peer->ipaddr = *addr;
66 peer->af_specific = sctp_get_af_specific(addr->sa.sa_family); 66 peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
67 peer->asoc = NULL;
68
69 peer->dst = NULL;
70 memset(&peer->saddr, 0, sizeof(union sctp_addr)); 67 memset(&peer->saddr, 0, sizeof(union sctp_addr));
71 68
72 /* From 6.3.1 RTO Calculation: 69 /* From 6.3.1 RTO Calculation:
@@ -76,34 +73,21 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
76 * parameter 'RTO.Initial'. 73 * parameter 'RTO.Initial'.
77 */ 74 */
78 peer->rto = msecs_to_jiffies(sctp_rto_initial); 75 peer->rto = msecs_to_jiffies(sctp_rto_initial);
79 peer->rtt = 0;
80 peer->rttvar = 0;
81 peer->srtt = 0;
82 peer->rto_pending = 0;
83 peer->hb_sent = 0;
84 peer->fast_recovery = 0;
85 76
86 peer->last_time_heard = jiffies; 77 peer->last_time_heard = jiffies;
87 peer->last_time_ecne_reduced = jiffies; 78 peer->last_time_ecne_reduced = jiffies;
88 79
89 peer->init_sent_count = 0;
90
91 peer->param_flags = SPP_HB_DISABLE | 80 peer->param_flags = SPP_HB_DISABLE |
92 SPP_PMTUD_ENABLE | 81 SPP_PMTUD_ENABLE |
93 SPP_SACKDELAY_ENABLE; 82 SPP_SACKDELAY_ENABLE;
94 peer->hbinterval = 0;
95 83
96 /* Initialize the default path max_retrans. */ 84 /* Initialize the default path max_retrans. */
97 peer->pathmaxrxt = sctp_max_retrans_path; 85 peer->pathmaxrxt = sctp_max_retrans_path;
98 peer->error_count = 0;
99 86
100 INIT_LIST_HEAD(&peer->transmitted); 87 INIT_LIST_HEAD(&peer->transmitted);
101 INIT_LIST_HEAD(&peer->send_ready); 88 INIT_LIST_HEAD(&peer->send_ready);
102 INIT_LIST_HEAD(&peer->transports); 89 INIT_LIST_HEAD(&peer->transports);
103 90
104 peer->T3_rtx_timer.expires = 0;
105 peer->hb_timer.expires = 0;
106
107 setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, 91 setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event,
108 (unsigned long)peer); 92 (unsigned long)peer);
109 setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event, 93 setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event,
@@ -113,15 +97,6 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
113 get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce)); 97 get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
114 98
115 atomic_set(&peer->refcnt, 1); 99 atomic_set(&peer->refcnt, 1);
116 peer->dead = 0;
117
118 peer->malloced = 0;
119
120 /* Initialize the state information for SFR-CACC */
121 peer->cacc.changeover_active = 0;
122 peer->cacc.cycling_changeover = 0;
123 peer->cacc.next_tsn_at_change = 0;
124 peer->cacc.cacc_saw_newack = 0;
125 100
126 return peer; 101 return peer;
127} 102}
@@ -195,7 +170,7 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
195/* Start T3_rtx timer if it is not already running and update the heartbeat 170/* Start T3_rtx timer if it is not already running and update the heartbeat
196 * timer. This routine is called every time a DATA chunk is sent. 171 * timer. This routine is called every time a DATA chunk is sent.
197 */ 172 */
198void sctp_transport_reset_timers(struct sctp_transport *transport, int force) 173void sctp_transport_reset_timers(struct sctp_transport *transport)
199{ 174{
200 /* RFC 2960 6.3.2 Retransmission Timer Rules 175 /* RFC 2960 6.3.2 Retransmission Timer Rules
201 * 176 *
@@ -205,7 +180,7 @@ void sctp_transport_reset_timers(struct sctp_transport *transport, int force)
205 * address. 180 * address.
206 */ 181 */
207 182
208 if (force || !timer_pending(&transport->T3_rtx_timer)) 183 if (!timer_pending(&transport->T3_rtx_timer))
209 if (!mod_timer(&transport->T3_rtx_timer, 184 if (!mod_timer(&transport->T3_rtx_timer,
210 jiffies + transport->rto)) 185 jiffies + transport->rto))
211 sctp_transport_hold(transport); 186 sctp_transport_hold(transport);
@@ -403,15 +378,16 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
403void sctp_transport_raise_cwnd(struct sctp_transport *transport, 378void sctp_transport_raise_cwnd(struct sctp_transport *transport,
404 __u32 sack_ctsn, __u32 bytes_acked) 379 __u32 sack_ctsn, __u32 bytes_acked)
405{ 380{
381 struct sctp_association *asoc = transport->asoc;
406 __u32 cwnd, ssthresh, flight_size, pba, pmtu; 382 __u32 cwnd, ssthresh, flight_size, pba, pmtu;
407 383
408 cwnd = transport->cwnd; 384 cwnd = transport->cwnd;
409 flight_size = transport->flight_size; 385 flight_size = transport->flight_size;
410 386
411 /* See if we need to exit Fast Recovery first */ 387 /* See if we need to exit Fast Recovery first */
412 if (transport->fast_recovery && 388 if (asoc->fast_recovery &&
413 TSN_lte(transport->fast_recovery_exit, sack_ctsn)) 389 TSN_lte(asoc->fast_recovery_exit, sack_ctsn))
414 transport->fast_recovery = 0; 390 asoc->fast_recovery = 0;
415 391
416 /* The appropriate cwnd increase algorithm is performed if, and only 392 /* The appropriate cwnd increase algorithm is performed if, and only
417 * if the cumulative TSN whould advanced and the congestion window is 393 * if the cumulative TSN whould advanced and the congestion window is
@@ -440,7 +416,7 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport,
440 * 2) the destination's path MTU. This upper bound protects 416 * 2) the destination's path MTU. This upper bound protects
441 * against the ACK-Splitting attack outlined in [SAVAGE99]. 417 * against the ACK-Splitting attack outlined in [SAVAGE99].
442 */ 418 */
443 if (transport->fast_recovery) 419 if (asoc->fast_recovery)
444 return; 420 return;
445 421
446 if (bytes_acked > pmtu) 422 if (bytes_acked > pmtu)
@@ -491,6 +467,8 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport,
491void sctp_transport_lower_cwnd(struct sctp_transport *transport, 467void sctp_transport_lower_cwnd(struct sctp_transport *transport,
492 sctp_lower_cwnd_t reason) 468 sctp_lower_cwnd_t reason)
493{ 469{
470 struct sctp_association *asoc = transport->asoc;
471
494 switch (reason) { 472 switch (reason) {
495 case SCTP_LOWER_CWND_T3_RTX: 473 case SCTP_LOWER_CWND_T3_RTX:
496 /* RFC 2960 Section 7.2.3, sctpimpguide 474 /* RFC 2960 Section 7.2.3, sctpimpguide
@@ -501,11 +479,11 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
501 * partial_bytes_acked = 0 479 * partial_bytes_acked = 0
502 */ 480 */
503 transport->ssthresh = max(transport->cwnd/2, 481 transport->ssthresh = max(transport->cwnd/2,
504 4*transport->asoc->pathmtu); 482 4*asoc->pathmtu);
505 transport->cwnd = transport->asoc->pathmtu; 483 transport->cwnd = asoc->pathmtu;
506 484
507 /* T3-rtx also clears fast recovery on the transport */ 485 /* T3-rtx also clears fast recovery */
508 transport->fast_recovery = 0; 486 asoc->fast_recovery = 0;
509 break; 487 break;
510 488
511 case SCTP_LOWER_CWND_FAST_RTX: 489 case SCTP_LOWER_CWND_FAST_RTX:
@@ -521,15 +499,15 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
521 * cwnd = ssthresh 499 * cwnd = ssthresh
522 * partial_bytes_acked = 0 500 * partial_bytes_acked = 0
523 */ 501 */
524 if (transport->fast_recovery) 502 if (asoc->fast_recovery)
525 return; 503 return;
526 504
527 /* Mark Fast recovery */ 505 /* Mark Fast recovery */
528 transport->fast_recovery = 1; 506 asoc->fast_recovery = 1;
529 transport->fast_recovery_exit = transport->asoc->next_tsn - 1; 507 asoc->fast_recovery_exit = asoc->next_tsn - 1;
530 508
531 transport->ssthresh = max(transport->cwnd/2, 509 transport->ssthresh = max(transport->cwnd/2,
532 4*transport->asoc->pathmtu); 510 4*asoc->pathmtu);
533 transport->cwnd = transport->ssthresh; 511 transport->cwnd = transport->ssthresh;
534 break; 512 break;
535 513
@@ -549,7 +527,7 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
549 if (time_after(jiffies, transport->last_time_ecne_reduced + 527 if (time_after(jiffies, transport->last_time_ecne_reduced +
550 transport->rtt)) { 528 transport->rtt)) {
551 transport->ssthresh = max(transport->cwnd/2, 529 transport->ssthresh = max(transport->cwnd/2,
552 4*transport->asoc->pathmtu); 530 4*asoc->pathmtu);
553 transport->cwnd = transport->ssthresh; 531 transport->cwnd = transport->ssthresh;
554 transport->last_time_ecne_reduced = jiffies; 532 transport->last_time_ecne_reduced = jiffies;
555 } 533 }
@@ -565,7 +543,7 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
565 * interval. 543 * interval.
566 */ 544 */
567 transport->cwnd = max(transport->cwnd/2, 545 transport->cwnd = max(transport->cwnd/2,
568 4*transport->asoc->pathmtu); 546 4*asoc->pathmtu);
569 break; 547 break;
570 } 548 }
571 549
@@ -650,7 +628,6 @@ void sctp_transport_reset(struct sctp_transport *t)
650 t->error_count = 0; 628 t->error_count = 0;
651 t->rto_pending = 0; 629 t->rto_pending = 0;
652 t->hb_sent = 0; 630 t->hb_sent = 0;
653 t->fast_recovery = 0;
654 631
655 /* Initialize the state information for SFR-CACC */ 632 /* Initialize the state information for SFR-CACC */
656 t->cacc.changeover_active = 0; 633 t->cacc.changeover_active = 0;
diff --git a/net/socket.c b/net/socket.c
index 35bc198bbf68..dae8c6b84a09 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -252,9 +252,14 @@ static struct inode *sock_alloc_inode(struct super_block *sb)
252 ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); 252 ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL);
253 if (!ei) 253 if (!ei)
254 return NULL; 254 return NULL;
255 init_waitqueue_head(&ei->socket.wait); 255 ei->socket.wq = kmalloc(sizeof(struct socket_wq), GFP_KERNEL);
256 if (!ei->socket.wq) {
257 kmem_cache_free(sock_inode_cachep, ei);
258 return NULL;
259 }
260 init_waitqueue_head(&ei->socket.wq->wait);
261 ei->socket.wq->fasync_list = NULL;
256 262
257 ei->socket.fasync_list = NULL;
258 ei->socket.state = SS_UNCONNECTED; 263 ei->socket.state = SS_UNCONNECTED;
259 ei->socket.flags = 0; 264 ei->socket.flags = 0;
260 ei->socket.ops = NULL; 265 ei->socket.ops = NULL;
@@ -264,10 +269,21 @@ static struct inode *sock_alloc_inode(struct super_block *sb)
264 return &ei->vfs_inode; 269 return &ei->vfs_inode;
265} 270}
266 271
272
273static void wq_free_rcu(struct rcu_head *head)
274{
275 struct socket_wq *wq = container_of(head, struct socket_wq, rcu);
276
277 kfree(wq);
278}
279
267static void sock_destroy_inode(struct inode *inode) 280static void sock_destroy_inode(struct inode *inode)
268{ 281{
269 kmem_cache_free(sock_inode_cachep, 282 struct socket_alloc *ei;
270 container_of(inode, struct socket_alloc, vfs_inode)); 283
284 ei = container_of(inode, struct socket_alloc, vfs_inode);
285 call_rcu(&ei->socket.wq->rcu, wq_free_rcu);
286 kmem_cache_free(sock_inode_cachep, ei);
271} 287}
272 288
273static void init_once(void *foo) 289static void init_once(void *foo)
@@ -513,7 +529,7 @@ void sock_release(struct socket *sock)
513 module_put(owner); 529 module_put(owner);
514 } 530 }
515 531
516 if (sock->fasync_list) 532 if (sock->wq->fasync_list)
517 printk(KERN_ERR "sock_release: fasync list not empty!\n"); 533 printk(KERN_ERR "sock_release: fasync list not empty!\n");
518 534
519 percpu_sub(sockets_in_use, 1); 535 percpu_sub(sockets_in_use, 1);
@@ -655,13 +671,13 @@ inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff
655 sizeof(__u32), &skb->dropcount); 671 sizeof(__u32), &skb->dropcount);
656} 672}
657 673
658void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, 674void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
659 struct sk_buff *skb) 675 struct sk_buff *skb)
660{ 676{
661 sock_recv_timestamp(msg, sk, skb); 677 sock_recv_timestamp(msg, sk, skb);
662 sock_recv_drops(msg, sk, skb); 678 sock_recv_drops(msg, sk, skb);
663} 679}
664EXPORT_SYMBOL_GPL(sock_recv_ts_and_drops); 680EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops);
665 681
666static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock, 682static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock,
667 struct msghdr *msg, size_t size, int flags) 683 struct msghdr *msg, size_t size, int flags)
@@ -1067,87 +1083,44 @@ static int sock_close(struct inode *inode, struct file *filp)
1067 * 1. fasync_list is modified only under process context socket lock 1083 * 1. fasync_list is modified only under process context socket lock
1068 * i.e. under semaphore. 1084 * i.e. under semaphore.
1069 * 2. fasync_list is used under read_lock(&sk->sk_callback_lock) 1085 * 2. fasync_list is used under read_lock(&sk->sk_callback_lock)
1070 * or under socket lock. 1086 * or under socket lock
1071 * 3. fasync_list can be used from softirq context, so that
1072 * modification under socket lock have to be enhanced with
1073 * write_lock_bh(&sk->sk_callback_lock).
1074 * --ANK (990710)
1075 */ 1087 */
1076 1088
1077static int sock_fasync(int fd, struct file *filp, int on) 1089static int sock_fasync(int fd, struct file *filp, int on)
1078{ 1090{
1079 struct fasync_struct *fa, *fna = NULL, **prev; 1091 struct socket *sock = filp->private_data;
1080 struct socket *sock; 1092 struct sock *sk = sock->sk;
1081 struct sock *sk;
1082
1083 if (on) {
1084 fna = kmalloc(sizeof(struct fasync_struct), GFP_KERNEL);
1085 if (fna == NULL)
1086 return -ENOMEM;
1087 }
1088
1089 sock = filp->private_data;
1090 1093
1091 sk = sock->sk; 1094 if (sk == NULL)
1092 if (sk == NULL) {
1093 kfree(fna);
1094 return -EINVAL; 1095 return -EINVAL;
1095 }
1096 1096
1097 lock_sock(sk); 1097 lock_sock(sk);
1098 1098
1099 spin_lock(&filp->f_lock); 1099 fasync_helper(fd, filp, on, &sock->wq->fasync_list);
1100 if (on)
1101 filp->f_flags |= FASYNC;
1102 else
1103 filp->f_flags &= ~FASYNC;
1104 spin_unlock(&filp->f_lock);
1105
1106 prev = &(sock->fasync_list);
1107
1108 for (fa = *prev; fa != NULL; prev = &fa->fa_next, fa = *prev)
1109 if (fa->fa_file == filp)
1110 break;
1111
1112 if (on) {
1113 if (fa != NULL) {
1114 write_lock_bh(&sk->sk_callback_lock);
1115 fa->fa_fd = fd;
1116 write_unlock_bh(&sk->sk_callback_lock);
1117 1100
1118 kfree(fna); 1101 if (!sock->wq->fasync_list)
1119 goto out; 1102 sock_reset_flag(sk, SOCK_FASYNC);
1120 } 1103 else
1121 fna->fa_file = filp;
1122 fna->fa_fd = fd;
1123 fna->magic = FASYNC_MAGIC;
1124 fna->fa_next = sock->fasync_list;
1125 write_lock_bh(&sk->sk_callback_lock);
1126 sock->fasync_list = fna;
1127 sock_set_flag(sk, SOCK_FASYNC); 1104 sock_set_flag(sk, SOCK_FASYNC);
1128 write_unlock_bh(&sk->sk_callback_lock);
1129 } else {
1130 if (fa != NULL) {
1131 write_lock_bh(&sk->sk_callback_lock);
1132 *prev = fa->fa_next;
1133 if (!sock->fasync_list)
1134 sock_reset_flag(sk, SOCK_FASYNC);
1135 write_unlock_bh(&sk->sk_callback_lock);
1136 kfree(fa);
1137 }
1138 }
1139 1105
1140out: 1106 release_sock(sk);
1141 release_sock(sock->sk);
1142 return 0; 1107 return 0;
1143} 1108}
1144 1109
1145/* This function may be called only under socket lock or callback_lock */ 1110/* This function may be called only under socket lock or callback_lock or rcu_lock */
1146 1111
1147int sock_wake_async(struct socket *sock, int how, int band) 1112int sock_wake_async(struct socket *sock, int how, int band)
1148{ 1113{
1149 if (!sock || !sock->fasync_list) 1114 struct socket_wq *wq;
1115
1116 if (!sock)
1150 return -1; 1117 return -1;
1118 rcu_read_lock();
1119 wq = rcu_dereference(sock->wq);
1120 if (!wq || !wq->fasync_list) {
1121 rcu_read_unlock();
1122 return -1;
1123 }
1151 switch (how) { 1124 switch (how) {
1152 case SOCK_WAKE_WAITD: 1125 case SOCK_WAKE_WAITD:
1153 if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) 1126 if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
@@ -1159,11 +1132,12 @@ int sock_wake_async(struct socket *sock, int how, int band)
1159 /* fall through */ 1132 /* fall through */
1160 case SOCK_WAKE_IO: 1133 case SOCK_WAKE_IO:
1161call_kill: 1134call_kill:
1162 __kill_fasync(sock->fasync_list, SIGIO, band); 1135 kill_fasync(&wq->fasync_list, SIGIO, band);
1163 break; 1136 break;
1164 case SOCK_WAKE_URG: 1137 case SOCK_WAKE_URG:
1165 __kill_fasync(sock->fasync_list, SIGURG, band); 1138 kill_fasync(&wq->fasync_list, SIGURG, band);
1166 } 1139 }
1140 rcu_read_unlock();
1167 return 0; 1141 return 0;
1168} 1142}
1169 1143
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index a29f259204e6..ce0d5b35c2ac 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -419,8 +419,8 @@ static void svc_udp_data_ready(struct sock *sk, int count)
419 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 419 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
420 svc_xprt_enqueue(&svsk->sk_xprt); 420 svc_xprt_enqueue(&svsk->sk_xprt);
421 } 421 }
422 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 422 if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
423 wake_up_interruptible(sk->sk_sleep); 423 wake_up_interruptible(sk_sleep(sk));
424} 424}
425 425
426/* 426/*
@@ -436,10 +436,10 @@ static void svc_write_space(struct sock *sk)
436 svc_xprt_enqueue(&svsk->sk_xprt); 436 svc_xprt_enqueue(&svsk->sk_xprt);
437 } 437 }
438 438
439 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) { 439 if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) {
440 dprintk("RPC svc_write_space: someone sleeping on %p\n", 440 dprintk("RPC svc_write_space: someone sleeping on %p\n",
441 svsk); 441 svsk);
442 wake_up_interruptible(sk->sk_sleep); 442 wake_up_interruptible(sk_sleep(sk));
443 } 443 }
444} 444}
445 445
@@ -757,8 +757,8 @@ static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
757 printk("svc: socket %p: no user data\n", sk); 757 printk("svc: socket %p: no user data\n", sk);
758 } 758 }
759 759
760 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 760 if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
761 wake_up_interruptible_all(sk->sk_sleep); 761 wake_up_interruptible_all(sk_sleep(sk));
762} 762}
763 763
764/* 764/*
@@ -777,8 +777,8 @@ static void svc_tcp_state_change(struct sock *sk)
777 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); 777 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
778 svc_xprt_enqueue(&svsk->sk_xprt); 778 svc_xprt_enqueue(&svsk->sk_xprt);
779 } 779 }
780 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 780 if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
781 wake_up_interruptible_all(sk->sk_sleep); 781 wake_up_interruptible_all(sk_sleep(sk));
782} 782}
783 783
784static void svc_tcp_data_ready(struct sock *sk, int count) 784static void svc_tcp_data_ready(struct sock *sk, int count)
@@ -791,8 +791,8 @@ static void svc_tcp_data_ready(struct sock *sk, int count)
791 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 791 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
792 svc_xprt_enqueue(&svsk->sk_xprt); 792 svc_xprt_enqueue(&svsk->sk_xprt);
793 } 793 }
794 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 794 if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
795 wake_up_interruptible(sk->sk_sleep); 795 wake_up_interruptible(sk_sleep(sk));
796} 796}
797 797
798/* 798/*
@@ -1494,8 +1494,8 @@ static void svc_sock_detach(struct svc_xprt *xprt)
1494 sk->sk_data_ready = svsk->sk_odata; 1494 sk->sk_data_ready = svsk->sk_odata;
1495 sk->sk_write_space = svsk->sk_owspace; 1495 sk->sk_write_space = svsk->sk_owspace;
1496 1496
1497 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1497 if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
1498 wake_up_interruptible(sk->sk_sleep); 1498 wake_up_interruptible(sk_sleep(sk));
1499} 1499}
1500 1500
1501/* 1501/*
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 42f09ade0044..699ade68aac1 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -974,7 +974,7 @@ void xprt_reserve(struct rpc_task *task)
974 974
975static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) 975static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
976{ 976{
977 return xprt->xid++; 977 return (__force __be32)xprt->xid++;
978} 978}
979 979
980static inline void xprt_init_xid(struct rpc_xprt *xprt) 980static inline void xprt_init_xid(struct rpc_xprt *xprt)
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index fd90eb89842b..edea15a54e51 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -679,7 +679,10 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
679 int ret; 679 int ret;
680 680
681 dprintk("svcrdma: Creating RDMA socket\n"); 681 dprintk("svcrdma: Creating RDMA socket\n");
682 682 if (sa->sa_family != AF_INET) {
683 dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
684 return ERR_PTR(-EAFNOSUPPORT);
685 }
683 cma_xprt = rdma_create_xprt(serv, 1); 686 cma_xprt = rdma_create_xprt(serv, 1);
684 if (!cma_xprt) 687 if (!cma_xprt)
685 return ERR_PTR(-ENOMEM); 688 return ERR_PTR(-ENOMEM);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index cfb20b80b3a1..66e889ba48fd 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -446,7 +446,7 @@ static unsigned int poll(struct file *file, struct socket *sock,
446 struct sock *sk = sock->sk; 446 struct sock *sk = sock->sk;
447 u32 mask; 447 u32 mask;
448 448
449 poll_wait(file, sk->sk_sleep, wait); 449 poll_wait(file, sk_sleep(sk), wait);
450 450
451 if (!skb_queue_empty(&sk->sk_receive_queue) || 451 if (!skb_queue_empty(&sk->sk_receive_queue) ||
452 (sock->state == SS_UNCONNECTED) || 452 (sock->state == SS_UNCONNECTED) ||
@@ -591,7 +591,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
591 break; 591 break;
592 } 592 }
593 release_sock(sk); 593 release_sock(sk);
594 res = wait_event_interruptible(*sk->sk_sleep, 594 res = wait_event_interruptible(*sk_sleep(sk),
595 !tport->congested); 595 !tport->congested);
596 lock_sock(sk); 596 lock_sock(sk);
597 if (res) 597 if (res)
@@ -650,7 +650,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
650 break; 650 break;
651 } 651 }
652 release_sock(sk); 652 release_sock(sk);
653 res = wait_event_interruptible(*sk->sk_sleep, 653 res = wait_event_interruptible(*sk_sleep(sk),
654 (!tport->congested || !tport->connected)); 654 (!tport->congested || !tport->connected));
655 lock_sock(sk); 655 lock_sock(sk);
656 if (res) 656 if (res)
@@ -931,7 +931,7 @@ restart:
931 goto exit; 931 goto exit;
932 } 932 }
933 release_sock(sk); 933 release_sock(sk);
934 res = wait_event_interruptible(*sk->sk_sleep, 934 res = wait_event_interruptible(*sk_sleep(sk),
935 (!skb_queue_empty(&sk->sk_receive_queue) || 935 (!skb_queue_empty(&sk->sk_receive_queue) ||
936 (sock->state == SS_DISCONNECTING))); 936 (sock->state == SS_DISCONNECTING)));
937 lock_sock(sk); 937 lock_sock(sk);
@@ -1064,7 +1064,7 @@ restart:
1064 goto exit; 1064 goto exit;
1065 } 1065 }
1066 release_sock(sk); 1066 release_sock(sk);
1067 res = wait_event_interruptible(*sk->sk_sleep, 1067 res = wait_event_interruptible(*sk_sleep(sk),
1068 (!skb_queue_empty(&sk->sk_receive_queue) || 1068 (!skb_queue_empty(&sk->sk_receive_queue) ||
1069 (sock->state == SS_DISCONNECTING))); 1069 (sock->state == SS_DISCONNECTING)));
1070 lock_sock(sk); 1070 lock_sock(sk);
@@ -1271,8 +1271,8 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1271 tipc_disconnect_port(tipc_sk_port(sk)); 1271 tipc_disconnect_port(tipc_sk_port(sk));
1272 } 1272 }
1273 1273
1274 if (waitqueue_active(sk->sk_sleep)) 1274 if (waitqueue_active(sk_sleep(sk)))
1275 wake_up_interruptible(sk->sk_sleep); 1275 wake_up_interruptible(sk_sleep(sk));
1276 return TIPC_OK; 1276 return TIPC_OK;
1277} 1277}
1278 1278
@@ -1343,8 +1343,8 @@ static void wakeupdispatch(struct tipc_port *tport)
1343{ 1343{
1344 struct sock *sk = (struct sock *)tport->usr_handle; 1344 struct sock *sk = (struct sock *)tport->usr_handle;
1345 1345
1346 if (waitqueue_active(sk->sk_sleep)) 1346 if (waitqueue_active(sk_sleep(sk)))
1347 wake_up_interruptible(sk->sk_sleep); 1347 wake_up_interruptible(sk_sleep(sk));
1348} 1348}
1349 1349
1350/** 1350/**
@@ -1426,7 +1426,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1426 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ 1426 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1427 1427
1428 release_sock(sk); 1428 release_sock(sk);
1429 res = wait_event_interruptible_timeout(*sk->sk_sleep, 1429 res = wait_event_interruptible_timeout(*sk_sleep(sk),
1430 (!skb_queue_empty(&sk->sk_receive_queue) || 1430 (!skb_queue_empty(&sk->sk_receive_queue) ||
1431 (sock->state != SS_CONNECTING)), 1431 (sock->state != SS_CONNECTING)),
1432 sk->sk_rcvtimeo); 1432 sk->sk_rcvtimeo);
@@ -1521,7 +1521,7 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
1521 goto exit; 1521 goto exit;
1522 } 1522 }
1523 release_sock(sk); 1523 release_sock(sk);
1524 res = wait_event_interruptible(*sk->sk_sleep, 1524 res = wait_event_interruptible(*sk_sleep(sk),
1525 (!skb_queue_empty(&sk->sk_receive_queue))); 1525 (!skb_queue_empty(&sk->sk_receive_queue)));
1526 lock_sock(sk); 1526 lock_sock(sk);
1527 if (res) 1527 if (res)
@@ -1632,8 +1632,8 @@ restart:
1632 /* Discard any unreceived messages; wake up sleeping tasks */ 1632 /* Discard any unreceived messages; wake up sleeping tasks */
1633 1633
1634 discard_rx_queue(sk); 1634 discard_rx_queue(sk);
1635 if (waitqueue_active(sk->sk_sleep)) 1635 if (waitqueue_active(sk_sleep(sk)))
1636 wake_up_interruptible(sk->sk_sleep); 1636 wake_up_interruptible(sk_sleep(sk));
1637 res = 0; 1637 res = 0;
1638 break; 1638 break;
1639 1639
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 3d9122e78f41..fef2cc5e9d2b 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -313,13 +313,16 @@ static inline int unix_writable(struct sock *sk)
313 313
314static void unix_write_space(struct sock *sk) 314static void unix_write_space(struct sock *sk)
315{ 315{
316 read_lock(&sk->sk_callback_lock); 316 struct socket_wq *wq;
317
318 rcu_read_lock();
317 if (unix_writable(sk)) { 319 if (unix_writable(sk)) {
318 if (sk_has_sleeper(sk)) 320 wq = rcu_dereference(sk->sk_wq);
319 wake_up_interruptible_sync(sk->sk_sleep); 321 if (wq_has_sleeper(wq))
322 wake_up_interruptible_sync(&wq->wait);
320 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 323 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
321 } 324 }
322 read_unlock(&sk->sk_callback_lock); 325 rcu_read_unlock();
323} 326}
324 327
325/* When dgram socket disconnects (or changes its peer), we clear its receive 328/* When dgram socket disconnects (or changes its peer), we clear its receive
@@ -406,9 +409,7 @@ static int unix_release_sock(struct sock *sk, int embrion)
406 skpair->sk_err = ECONNRESET; 409 skpair->sk_err = ECONNRESET;
407 unix_state_unlock(skpair); 410 unix_state_unlock(skpair);
408 skpair->sk_state_change(skpair); 411 skpair->sk_state_change(skpair);
409 read_lock(&skpair->sk_callback_lock);
410 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP); 412 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
411 read_unlock(&skpair->sk_callback_lock);
412 } 413 }
413 sock_put(skpair); /* It may now die */ 414 sock_put(skpair); /* It may now die */
414 unix_peer(sk) = NULL; 415 unix_peer(sk) = NULL;
@@ -1142,7 +1143,7 @@ restart:
1142 newsk->sk_peercred.pid = task_tgid_vnr(current); 1143 newsk->sk_peercred.pid = task_tgid_vnr(current);
1143 current_euid_egid(&newsk->sk_peercred.uid, &newsk->sk_peercred.gid); 1144 current_euid_egid(&newsk->sk_peercred.uid, &newsk->sk_peercred.gid);
1144 newu = unix_sk(newsk); 1145 newu = unix_sk(newsk);
1145 newsk->sk_sleep = &newu->peer_wait; 1146 newsk->sk_wq = &newu->peer_wq;
1146 otheru = unix_sk(other); 1147 otheru = unix_sk(other);
1147 1148
1148 /* copy address information from listening to new sock*/ 1149 /* copy address information from listening to new sock*/
@@ -1736,7 +1737,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo)
1736 unix_state_lock(sk); 1737 unix_state_lock(sk);
1737 1738
1738 for (;;) { 1739 for (;;) {
1739 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1740 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1740 1741
1741 if (!skb_queue_empty(&sk->sk_receive_queue) || 1742 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1742 sk->sk_err || 1743 sk->sk_err ||
@@ -1752,7 +1753,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo)
1752 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1753 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1753 } 1754 }
1754 1755
1755 finish_wait(sk->sk_sleep, &wait); 1756 finish_wait(sk_sleep(sk), &wait);
1756 unix_state_unlock(sk); 1757 unix_state_unlock(sk);
1757 return timeo; 1758 return timeo;
1758} 1759}
@@ -1931,12 +1932,10 @@ static int unix_shutdown(struct socket *sock, int mode)
1931 other->sk_shutdown |= peer_mode; 1932 other->sk_shutdown |= peer_mode;
1932 unix_state_unlock(other); 1933 unix_state_unlock(other);
1933 other->sk_state_change(other); 1934 other->sk_state_change(other);
1934 read_lock(&other->sk_callback_lock);
1935 if (peer_mode == SHUTDOWN_MASK) 1935 if (peer_mode == SHUTDOWN_MASK)
1936 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP); 1936 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
1937 else if (peer_mode & RCV_SHUTDOWN) 1937 else if (peer_mode & RCV_SHUTDOWN)
1938 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN); 1938 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
1939 read_unlock(&other->sk_callback_lock);
1940 } 1939 }
1941 if (other) 1940 if (other)
1942 sock_put(other); 1941 sock_put(other);
@@ -1991,7 +1990,7 @@ static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table
1991 struct sock *sk = sock->sk; 1990 struct sock *sk = sock->sk;
1992 unsigned int mask; 1991 unsigned int mask;
1993 1992
1994 sock_poll_wait(file, sk->sk_sleep, wait); 1993 sock_poll_wait(file, sk_sleep(sk), wait);
1995 mask = 0; 1994 mask = 0;
1996 1995
1997 /* exceptional events? */ 1996 /* exceptional events? */
@@ -2028,7 +2027,7 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2028 struct sock *sk = sock->sk, *other; 2027 struct sock *sk = sock->sk, *other;
2029 unsigned int mask, writable; 2028 unsigned int mask, writable;
2030 2029
2031 sock_poll_wait(file, sk->sk_sleep, wait); 2030 sock_poll_wait(file, sk_sleep(sk), wait);
2032 mask = 0; 2031 mask = 0;
2033 2032
2034 /* exceptional events? */ 2033 /* exceptional events? */
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 14c22c3768da..c8df6fda0b1f 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -153,15 +153,6 @@ void unix_notinflight(struct file *fp)
153 } 153 }
154} 154}
155 155
156static inline struct sk_buff *sock_queue_head(struct sock *sk)
157{
158 return (struct sk_buff *)&sk->sk_receive_queue;
159}
160
161#define receive_queue_for_each_skb(sk, next, skb) \
162 for (skb = sock_queue_head(sk)->next, next = skb->next; \
163 skb != sock_queue_head(sk); skb = next, next = skb->next)
164
165static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), 156static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
166 struct sk_buff_head *hitlist) 157 struct sk_buff_head *hitlist)
167{ 158{
@@ -169,7 +160,7 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
169 struct sk_buff *next; 160 struct sk_buff *next;
170 161
171 spin_lock(&x->sk_receive_queue.lock); 162 spin_lock(&x->sk_receive_queue.lock);
172 receive_queue_for_each_skb(x, next, skb) { 163 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
173 /* 164 /*
174 * Do we have file descriptors ? 165 * Do we have file descriptors ?
175 */ 166 */
@@ -225,7 +216,7 @@ static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
225 * and perform a scan on them as well. 216 * and perform a scan on them as well.
226 */ 217 */
227 spin_lock(&x->sk_receive_queue.lock); 218 spin_lock(&x->sk_receive_queue.lock);
228 receive_queue_for_each_skb(x, next, skb) { 219 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
229 u = unix_sk(skb->sk); 220 u = unix_sk(skb->sk);
230 221
231 /* 222 /*
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 6ac70c101523..37d0e0ab4432 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -705,7 +705,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
705 wdev->ps = true; 705 wdev->ps = true;
706 else 706 else
707 wdev->ps = false; 707 wdev->ps = false;
708 wdev->ps_timeout = 100; 708 /* allow mac80211 to determine the timeout */
709 wdev->ps_timeout = -1;
709 if (rdev->ops->set_power_mgmt) 710 if (rdev->ops->set_power_mgmt)
710 if (rdev->ops->set_power_mgmt(wdev->wiphy, dev, 711 if (rdev->ops->set_power_mgmt(wdev->wiphy, dev,
711 wdev->ps, 712 wdev->ps,
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 356a84a5daee..01da83ddcff7 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -152,6 +152,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
152 [NL80211_ATTR_PS_STATE] = { .type = NLA_U32 }, 152 [NL80211_ATTR_PS_STATE] = { .type = NLA_U32 },
153 [NL80211_ATTR_CQM] = { .type = NLA_NESTED, }, 153 [NL80211_ATTR_CQM] = { .type = NLA_NESTED, },
154 [NL80211_ATTR_LOCAL_STATE_CHANGE] = { .type = NLA_FLAG }, 154 [NL80211_ATTR_LOCAL_STATE_CHANGE] = { .type = NLA_FLAG },
155 [NL80211_ATTR_AP_ISOLATE] = { .type = NLA_U8 },
155}; 156};
156 157
157/* policy for the attributes */ 158/* policy for the attributes */
@@ -2442,6 +2443,7 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
2442 params.use_cts_prot = -1; 2443 params.use_cts_prot = -1;
2443 params.use_short_preamble = -1; 2444 params.use_short_preamble = -1;
2444 params.use_short_slot_time = -1; 2445 params.use_short_slot_time = -1;
2446 params.ap_isolate = -1;
2445 2447
2446 if (info->attrs[NL80211_ATTR_BSS_CTS_PROT]) 2448 if (info->attrs[NL80211_ATTR_BSS_CTS_PROT])
2447 params.use_cts_prot = 2449 params.use_cts_prot =
@@ -2458,6 +2460,8 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
2458 params.basic_rates_len = 2460 params.basic_rates_len =
2459 nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); 2461 nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
2460 } 2462 }
2463 if (info->attrs[NL80211_ATTR_AP_ISOLATE])
2464 params.ap_isolate = !!nla_get_u8(info->attrs[NL80211_ATTR_AP_ISOLATE]);
2461 2465
2462 rtnl_lock(); 2466 rtnl_lock();
2463 2467
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index c2735775ec19..8ddf5ae0dd03 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -518,12 +518,16 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
518 ev->type = EVENT_CONNECT_RESULT; 518 ev->type = EVENT_CONNECT_RESULT;
519 if (bssid) 519 if (bssid)
520 memcpy(ev->cr.bssid, bssid, ETH_ALEN); 520 memcpy(ev->cr.bssid, bssid, ETH_ALEN);
521 ev->cr.req_ie = ((u8 *)ev) + sizeof(*ev); 521 if (req_ie_len) {
522 ev->cr.req_ie_len = req_ie_len; 522 ev->cr.req_ie = ((u8 *)ev) + sizeof(*ev);
523 memcpy((void *)ev->cr.req_ie, req_ie, req_ie_len); 523 ev->cr.req_ie_len = req_ie_len;
524 ev->cr.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len; 524 memcpy((void *)ev->cr.req_ie, req_ie, req_ie_len);
525 ev->cr.resp_ie_len = resp_ie_len; 525 }
526 memcpy((void *)ev->cr.resp_ie, resp_ie, resp_ie_len); 526 if (resp_ie_len) {
527 ev->cr.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len;
528 ev->cr.resp_ie_len = resp_ie_len;
529 memcpy((void *)ev->cr.resp_ie, resp_ie, resp_ie_len);
530 }
527 ev->cr.status = status; 531 ev->cr.status = status;
528 532
529 spin_lock_irqsave(&wdev->event_lock, flags); 533 spin_lock_irqsave(&wdev->event_lock, flags);
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index cbddd0cb83f1..296e65e01064 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -402,6 +402,7 @@ static void __x25_destroy_socket(struct sock *sk)
402 /* 402 /*
403 * Queue the unaccepted socket for death 403 * Queue the unaccepted socket for death
404 */ 404 */
405 skb->sk->sk_state = TCP_LISTEN;
405 sock_set_flag(skb->sk, SOCK_DEAD); 406 sock_set_flag(skb->sk, SOCK_DEAD);
406 x25_start_heartbeat(skb->sk); 407 x25_start_heartbeat(skb->sk);
407 x25_sk(skb->sk)->state = X25_STATE_0; 408 x25_sk(skb->sk)->state = X25_STATE_0;
@@ -718,7 +719,7 @@ static int x25_wait_for_connection_establishment(struct sock *sk)
718 DECLARE_WAITQUEUE(wait, current); 719 DECLARE_WAITQUEUE(wait, current);
719 int rc; 720 int rc;
720 721
721 add_wait_queue_exclusive(sk->sk_sleep, &wait); 722 add_wait_queue_exclusive(sk_sleep(sk), &wait);
722 for (;;) { 723 for (;;) {
723 __set_current_state(TASK_INTERRUPTIBLE); 724 __set_current_state(TASK_INTERRUPTIBLE);
724 rc = -ERESTARTSYS; 725 rc = -ERESTARTSYS;
@@ -738,7 +739,7 @@ static int x25_wait_for_connection_establishment(struct sock *sk)
738 break; 739 break;
739 } 740 }
740 __set_current_state(TASK_RUNNING); 741 __set_current_state(TASK_RUNNING);
741 remove_wait_queue(sk->sk_sleep, &wait); 742 remove_wait_queue(sk_sleep(sk), &wait);
742 return rc; 743 return rc;
743} 744}
744 745
@@ -838,7 +839,7 @@ static int x25_wait_for_data(struct sock *sk, long timeout)
838 DECLARE_WAITQUEUE(wait, current); 839 DECLARE_WAITQUEUE(wait, current);
839 int rc = 0; 840 int rc = 0;
840 841
841 add_wait_queue_exclusive(sk->sk_sleep, &wait); 842 add_wait_queue_exclusive(sk_sleep(sk), &wait);
842 for (;;) { 843 for (;;) {
843 __set_current_state(TASK_INTERRUPTIBLE); 844 __set_current_state(TASK_INTERRUPTIBLE);
844 if (sk->sk_shutdown & RCV_SHUTDOWN) 845 if (sk->sk_shutdown & RCV_SHUTDOWN)
@@ -858,7 +859,7 @@ static int x25_wait_for_data(struct sock *sk, long timeout)
858 break; 859 break;
859 } 860 }
860 __set_current_state(TASK_RUNNING); 861 __set_current_state(TASK_RUNNING);
861 remove_wait_queue(sk->sk_sleep, &wait); 862 remove_wait_queue(sk_sleep(sk), &wait);
862 return rc; 863 return rc;
863} 864}
864 865
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index b9ef682230a0..9005f6daeab5 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -24,6 +24,7 @@
24#include <net/sock.h> 24#include <net/sock.h>
25#include <linux/if_arp.h> 25#include <linux/if_arp.h>
26#include <net/x25.h> 26#include <net/x25.h>
27#include <net/x25device.h>
27 28
28static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) 29static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
29{ 30{
@@ -115,19 +116,22 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
115 } 116 }
116 117
117 switch (skb->data[0]) { 118 switch (skb->data[0]) {
118 case 0x00: 119
119 skb_pull(skb, 1); 120 case X25_IFACE_DATA:
120 if (x25_receive_data(skb, nb)) { 121 skb_pull(skb, 1);
121 x25_neigh_put(nb); 122 if (x25_receive_data(skb, nb)) {
122 goto out; 123 x25_neigh_put(nb);
123 } 124 goto out;
124 break; 125 }
125 case 0x01: 126 break;
126 x25_link_established(nb); 127
127 break; 128 case X25_IFACE_CONNECT:
128 case 0x02: 129 x25_link_established(nb);
129 x25_link_terminated(nb); 130 break;
130 break; 131
132 case X25_IFACE_DISCONNECT:
133 x25_link_terminated(nb);
134 break;
131 } 135 }
132 x25_neigh_put(nb); 136 x25_neigh_put(nb);
133drop: 137drop:
@@ -148,7 +152,7 @@ void x25_establish_link(struct x25_neigh *nb)
148 return; 152 return;
149 } 153 }
150 ptr = skb_put(skb, 1); 154 ptr = skb_put(skb, 1);
151 *ptr = 0x01; 155 *ptr = X25_IFACE_CONNECT;
152 break; 156 break;
153 157
154#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE) 158#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
@@ -184,7 +188,7 @@ void x25_terminate_link(struct x25_neigh *nb)
184 } 188 }
185 189
186 ptr = skb_put(skb, 1); 190 ptr = skb_put(skb, 1);
187 *ptr = 0x02; 191 *ptr = X25_IFACE_DISCONNECT;
188 192
189 skb->protocol = htons(ETH_P_X25); 193 skb->protocol = htons(ETH_P_X25);
190 skb->dev = nb->dev; 194 skb->dev = nb->dev;
@@ -200,7 +204,7 @@ void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb)
200 switch (nb->dev->type) { 204 switch (nb->dev->type) {
201 case ARPHRD_X25: 205 case ARPHRD_X25:
202 dptr = skb_push(skb, 1); 206 dptr = skb_push(skb, 1);
203 *dptr = 0x00; 207 *dptr = X25_IFACE_DATA;
204 break; 208 break;
205 209
206#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE) 210#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h
index e5195c99f71e..1396572d2ade 100644
--- a/net/xfrm/xfrm_hash.h
+++ b/net/xfrm/xfrm_hash.h
@@ -16,7 +16,8 @@ static inline unsigned int __xfrm6_addr_hash(xfrm_address_t *addr)
16 16
17static inline unsigned int __xfrm4_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr) 17static inline unsigned int __xfrm4_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr)
18{ 18{
19 return ntohl(daddr->a4 + saddr->a4); 19 u32 sum = (__force u32)daddr->a4 + (__force u32)saddr->a4;
20 return ntohl((__force __be32)sum);
20} 21}
21 22
22static inline unsigned int __xfrm6_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr) 23static inline unsigned int __xfrm6_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr)
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 7430ac26ec49..31f4ba43b48f 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1732,7 +1732,7 @@ int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
1732 struct dst_entry *dst, *dst_orig = *dst_p, *route; 1732 struct dst_entry *dst, *dst_orig = *dst_p, *route;
1733 u16 family = dst_orig->ops->family; 1733 u16 family = dst_orig->ops->family;
1734 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); 1734 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
1735 int i, err, num_pols, num_xfrms, drop_pols = 0; 1735 int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
1736 1736
1737restart: 1737restart:
1738 dst = NULL; 1738 dst = NULL;
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
index 8da6a8428086..cd4f734e2749 100644
--- a/security/selinux/ss/avtab.h
+++ b/security/selinux/ss/avtab.h
@@ -82,7 +82,7 @@ struct avtab_node *avtab_search_node_next(struct avtab_node *node, int specified
82void avtab_cache_init(void); 82void avtab_cache_init(void);
83void avtab_cache_destroy(void); 83void avtab_cache_destroy(void);
84 84
85#define MAX_AVTAB_HASH_BITS 13 85#define MAX_AVTAB_HASH_BITS 11
86#define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS) 86#define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS)
87#define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1) 87#define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1)
88#define MAX_AVTAB_SIZE MAX_AVTAB_HASH_BUCKETS 88#define MAX_AVTAB_SIZE MAX_AVTAB_HASH_BUCKETS
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c
index 656e474dca47..91acc9a243ec 100644
--- a/sound/arm/aaci.c
+++ b/sound/arm/aaci.c
@@ -863,7 +863,6 @@ static int __devinit aaci_probe_ac97(struct aaci *aaci)
863 struct snd_ac97 *ac97; 863 struct snd_ac97 *ac97;
864 int ret; 864 int ret;
865 865
866 writel(0, aaci->base + AC97_POWERDOWN);
867 /* 866 /*
868 * Assert AACIRESET for 2us 867 * Assert AACIRESET for 2us
869 */ 868 */
@@ -1047,7 +1046,11 @@ static int __devinit aaci_probe(struct amba_device *dev, struct amba_id *id)
1047 1046
1048 writel(0x1fff, aaci->base + AACI_INTCLR); 1047 writel(0x1fff, aaci->base + AACI_INTCLR);
1049 writel(aaci->maincr, aaci->base + AACI_MAINCR); 1048 writel(aaci->maincr, aaci->base + AACI_MAINCR);
1050 1049 /*
1050 * Fix: ac97 read back fail errors by reading
1051 * from any arbitrary aaci register.
1052 */
1053 readl(aaci->base + AACI_CSCH1);
1051 ret = aaci_probe_ac97(aaci); 1054 ret = aaci_probe_ac97(aaci);
1052 if (ret) 1055 if (ret)
1053 goto out; 1056 goto out;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index f8fd586ae024..f669442b7c82 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2272,6 +2272,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
2272 SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB), 2272 SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
2273 SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB), 2273 SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
2274 SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB), 2274 SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
2275 SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB),
2275 SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB), 2276 SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
2276 {} 2277 {}
2277}; 2278};
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index c7730dbb9ddb..aad1627f56f1 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -230,6 +230,7 @@ enum {
230 ALC888_ACER_ASPIRE_7730G, 230 ALC888_ACER_ASPIRE_7730G,
231 ALC883_MEDION, 231 ALC883_MEDION,
232 ALC883_MEDION_MD2, 232 ALC883_MEDION_MD2,
233 ALC883_MEDION_WIM2160,
233 ALC883_LAPTOP_EAPD, 234 ALC883_LAPTOP_EAPD,
234 ALC883_LENOVO_101E_2ch, 235 ALC883_LENOVO_101E_2ch,
235 ALC883_LENOVO_NB0763, 236 ALC883_LENOVO_NB0763,
@@ -1389,22 +1390,31 @@ struct alc_fixup {
1389 1390
1390static void alc_pick_fixup(struct hda_codec *codec, 1391static void alc_pick_fixup(struct hda_codec *codec,
1391 const struct snd_pci_quirk *quirk, 1392 const struct snd_pci_quirk *quirk,
1392 const struct alc_fixup *fix) 1393 const struct alc_fixup *fix,
1394 int pre_init)
1393{ 1395{
1394 const struct alc_pincfg *cfg; 1396 const struct alc_pincfg *cfg;
1395 1397
1396 quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk); 1398 quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk);
1397 if (!quirk) 1399 if (!quirk)
1398 return; 1400 return;
1399
1400 fix += quirk->value; 1401 fix += quirk->value;
1401 cfg = fix->pins; 1402 cfg = fix->pins;
1402 if (cfg) { 1403 if (pre_init && cfg) {
1404#ifdef CONFIG_SND_DEBUG_VERBOSE
1405 snd_printdd(KERN_INFO "hda_codec: %s: Apply pincfg for %s\n",
1406 codec->chip_name, quirk->name);
1407#endif
1403 for (; cfg->nid; cfg++) 1408 for (; cfg->nid; cfg++)
1404 snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val); 1409 snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val);
1405 } 1410 }
1406 if (fix->verbs) 1411 if (!pre_init && fix->verbs) {
1412#ifdef CONFIG_SND_DEBUG_VERBOSE
1413 snd_printdd(KERN_INFO "hda_codec: %s: Apply fix-verbs for %s\n",
1414 codec->chip_name, quirk->name);
1415#endif
1407 add_verb(codec->spec, fix->verbs); 1416 add_verb(codec->spec, fix->verbs);
1417 }
1408} 1418}
1409 1419
1410static int alc_read_coef_idx(struct hda_codec *codec, 1420static int alc_read_coef_idx(struct hda_codec *codec,
@@ -4808,6 +4818,25 @@ static void alc880_auto_init_analog_input(struct hda_codec *codec)
4808 } 4818 }
4809} 4819}
4810 4820
4821static void alc880_auto_init_input_src(struct hda_codec *codec)
4822{
4823 struct alc_spec *spec = codec->spec;
4824 int c;
4825
4826 for (c = 0; c < spec->num_adc_nids; c++) {
4827 unsigned int mux_idx;
4828 const struct hda_input_mux *imux;
4829 mux_idx = c >= spec->num_mux_defs ? 0 : c;
4830 imux = &spec->input_mux[mux_idx];
4831 if (!imux->num_items && mux_idx > 0)
4832 imux = &spec->input_mux[0];
4833 if (imux)
4834 snd_hda_codec_write(codec, spec->adc_nids[c], 0,
4835 AC_VERB_SET_CONNECT_SEL,
4836 imux->items[0].index);
4837 }
4838}
4839
4811/* parse the BIOS configuration and set up the alc_spec */ 4840/* parse the BIOS configuration and set up the alc_spec */
4812/* return 1 if successful, 0 if the proper config is not found, 4841/* return 1 if successful, 0 if the proper config is not found,
4813 * or a negative error code 4842 * or a negative error code
@@ -4886,6 +4915,7 @@ static void alc880_auto_init(struct hda_codec *codec)
4886 alc880_auto_init_multi_out(codec); 4915 alc880_auto_init_multi_out(codec);
4887 alc880_auto_init_extra_out(codec); 4916 alc880_auto_init_extra_out(codec);
4888 alc880_auto_init_analog_input(codec); 4917 alc880_auto_init_analog_input(codec);
4918 alc880_auto_init_input_src(codec);
4889 if (spec->unsol_event) 4919 if (spec->unsol_event)
4890 alc_inithook(codec); 4920 alc_inithook(codec);
4891} 4921}
@@ -6397,6 +6427,8 @@ static void alc260_auto_init_analog_input(struct hda_codec *codec)
6397 } 6427 }
6398} 6428}
6399 6429
6430#define alc260_auto_init_input_src alc880_auto_init_input_src
6431
6400/* 6432/*
6401 * generic initialization of ADC, input mixers and output mixers 6433 * generic initialization of ADC, input mixers and output mixers
6402 */ 6434 */
@@ -6483,6 +6515,7 @@ static void alc260_auto_init(struct hda_codec *codec)
6483 struct alc_spec *spec = codec->spec; 6515 struct alc_spec *spec = codec->spec;
6484 alc260_auto_init_multi_out(codec); 6516 alc260_auto_init_multi_out(codec);
6485 alc260_auto_init_analog_input(codec); 6517 alc260_auto_init_analog_input(codec);
6518 alc260_auto_init_input_src(codec);
6486 if (spec->unsol_event) 6519 if (spec->unsol_event)
6487 alc_inithook(codec); 6520 alc_inithook(codec);
6488} 6521}
@@ -8455,6 +8488,42 @@ static struct snd_kcontrol_new alc883_medion_md2_mixer[] = {
8455 { } /* end */ 8488 { } /* end */
8456}; 8489};
8457 8490
8491static struct snd_kcontrol_new alc883_medion_wim2160_mixer[] = {
8492 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
8493 HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
8494 HDA_CODEC_MUTE("Speaker Playback Switch", 0x15, 0x0, HDA_OUTPUT),
8495 HDA_CODEC_MUTE("Headphone Playback Switch", 0x1a, 0x0, HDA_OUTPUT),
8496 HDA_CODEC_VOLUME("Line Playback Volume", 0x08, 0x0, HDA_INPUT),
8497 HDA_CODEC_MUTE("Line Playback Switch", 0x08, 0x0, HDA_INPUT),
8498 { } /* end */
8499};
8500
8501static struct hda_verb alc883_medion_wim2160_verbs[] = {
8502 /* Unmute front mixer */
8503 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
8504 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
8505
8506 /* Set speaker pin to front mixer */
8507 {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
8508
8509 /* Init headphone pin */
8510 {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
8511 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
8512 {0x1a, AC_VERB_SET_CONNECT_SEL, 0x00},
8513 {0x1a, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
8514
8515 { } /* end */
8516};
8517
8518/* toggle speaker-output according to the hp-jack state */
8519static void alc883_medion_wim2160_setup(struct hda_codec *codec)
8520{
8521 struct alc_spec *spec = codec->spec;
8522
8523 spec->autocfg.hp_pins[0] = 0x1a;
8524 spec->autocfg.speaker_pins[0] = 0x15;
8525}
8526
8458static struct snd_kcontrol_new alc883_acer_aspire_mixer[] = { 8527static struct snd_kcontrol_new alc883_acer_aspire_mixer[] = {
8459 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), 8528 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
8460 HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), 8529 HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
@@ -9164,6 +9233,7 @@ static const char *alc882_models[ALC882_MODEL_LAST] = {
9164 [ALC888_ACER_ASPIRE_7730G] = "acer-aspire-7730g", 9233 [ALC888_ACER_ASPIRE_7730G] = "acer-aspire-7730g",
9165 [ALC883_MEDION] = "medion", 9234 [ALC883_MEDION] = "medion",
9166 [ALC883_MEDION_MD2] = "medion-md2", 9235 [ALC883_MEDION_MD2] = "medion-md2",
9236 [ALC883_MEDION_WIM2160] = "medion-wim2160",
9167 [ALC883_LAPTOP_EAPD] = "laptop-eapd", 9237 [ALC883_LAPTOP_EAPD] = "laptop-eapd",
9168 [ALC883_LENOVO_101E_2ch] = "lenovo-101e", 9238 [ALC883_LENOVO_101E_2ch] = "lenovo-101e",
9169 [ALC883_LENOVO_NB0763] = "lenovo-nb0763", 9239 [ALC883_LENOVO_NB0763] = "lenovo-nb0763",
@@ -9280,6 +9350,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = {
9280 SND_PCI_QUIRK(0x1462, 0xaa08, "MSI", ALC883_TARGA_2ch_DIG), 9350 SND_PCI_QUIRK(0x1462, 0xaa08, "MSI", ALC883_TARGA_2ch_DIG),
9281 9351
9282 SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG), 9352 SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG),
9353 SND_PCI_QUIRK(0x1558, 0x0571, "Clevo laptop M570U", ALC883_3ST_6ch_DIG),
9283 SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720), 9354 SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720),
9284 SND_PCI_QUIRK(0x1558, 0x0722, "Clevo laptop M720SR", ALC883_CLEVO_M720), 9355 SND_PCI_QUIRK(0x1558, 0x0722, "Clevo laptop M720SR", ALC883_CLEVO_M720),
9285 SND_PCI_QUIRK(0x1558, 0x5409, "Clevo laptop M540R", ALC883_CLEVO_M540R), 9356 SND_PCI_QUIRK(0x1558, 0x5409, "Clevo laptop M540R", ALC883_CLEVO_M540R),
@@ -9818,6 +9889,21 @@ static struct alc_config_preset alc882_presets[] = {
9818 .setup = alc883_medion_md2_setup, 9889 .setup = alc883_medion_md2_setup,
9819 .init_hook = alc_automute_amp, 9890 .init_hook = alc_automute_amp,
9820 }, 9891 },
9892 [ALC883_MEDION_WIM2160] = {
9893 .mixers = { alc883_medion_wim2160_mixer },
9894 .init_verbs = { alc883_init_verbs, alc883_medion_wim2160_verbs },
9895 .num_dacs = ARRAY_SIZE(alc883_dac_nids),
9896 .dac_nids = alc883_dac_nids,
9897 .dig_out_nid = ALC883_DIGOUT_NID,
9898 .num_adc_nids = ARRAY_SIZE(alc883_adc_nids),
9899 .adc_nids = alc883_adc_nids,
9900 .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
9901 .channel_mode = alc883_3ST_2ch_modes,
9902 .input_mux = &alc883_capture_source,
9903 .unsol_event = alc_automute_amp_unsol_event,
9904 .setup = alc883_medion_wim2160_setup,
9905 .init_hook = alc_automute_amp,
9906 },
9821 [ALC883_LAPTOP_EAPD] = { 9907 [ALC883_LAPTOP_EAPD] = {
9822 .mixers = { alc883_base_mixer }, 9908 .mixers = { alc883_base_mixer },
9823 .init_verbs = { alc883_init_verbs, alc882_eapd_verbs }, 9909 .init_verbs = { alc883_init_verbs, alc882_eapd_verbs },
@@ -10363,7 +10449,8 @@ static int patch_alc882(struct hda_codec *codec)
10363 board_config = ALC882_AUTO; 10449 board_config = ALC882_AUTO;
10364 } 10450 }
10365 10451
10366 alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups); 10452 if (board_config == ALC882_AUTO)
10453 alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups, 1);
10367 10454
10368 if (board_config == ALC882_AUTO) { 10455 if (board_config == ALC882_AUTO) {
10369 /* automatic parse from the BIOS config */ 10456 /* automatic parse from the BIOS config */
@@ -10436,6 +10523,9 @@ static int patch_alc882(struct hda_codec *codec)
10436 set_capture_mixer(codec); 10523 set_capture_mixer(codec);
10437 set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT); 10524 set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
10438 10525
10526 if (board_config == ALC882_AUTO)
10527 alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups, 0);
10528
10439 spec->vmaster_nid = 0x0c; 10529 spec->vmaster_nid = 0x0c;
10440 10530
10441 codec->patch_ops = alc_patch_ops; 10531 codec->patch_ops = alc_patch_ops;
@@ -12816,6 +12906,7 @@ static int alc268_new_analog_output(struct alc_spec *spec, hda_nid_t nid,
12816 dac = 0x02; 12906 dac = 0x02;
12817 break; 12907 break;
12818 case 0x15: 12908 case 0x15:
12909 case 0x21: /* ALC269vb has this pin, too */
12819 dac = 0x03; 12910 dac = 0x03;
12820 break; 12911 break;
12821 default: 12912 default:
@@ -13735,19 +13826,19 @@ static void alc269_laptop_unsol_event(struct hda_codec *codec,
13735 } 13826 }
13736} 13827}
13737 13828
13738static void alc269_laptop_dmic_setup(struct hda_codec *codec) 13829static void alc269_laptop_amic_setup(struct hda_codec *codec)
13739{ 13830{
13740 struct alc_spec *spec = codec->spec; 13831 struct alc_spec *spec = codec->spec;
13741 spec->autocfg.hp_pins[0] = 0x15; 13832 spec->autocfg.hp_pins[0] = 0x15;
13742 spec->autocfg.speaker_pins[0] = 0x14; 13833 spec->autocfg.speaker_pins[0] = 0x14;
13743 spec->ext_mic.pin = 0x18; 13834 spec->ext_mic.pin = 0x18;
13744 spec->ext_mic.mux_idx = 0; 13835 spec->ext_mic.mux_idx = 0;
13745 spec->int_mic.pin = 0x12; 13836 spec->int_mic.pin = 0x19;
13746 spec->int_mic.mux_idx = 5; 13837 spec->int_mic.mux_idx = 1;
13747 spec->auto_mic = 1; 13838 spec->auto_mic = 1;
13748} 13839}
13749 13840
13750static void alc269vb_laptop_dmic_setup(struct hda_codec *codec) 13841static void alc269_laptop_dmic_setup(struct hda_codec *codec)
13751{ 13842{
13752 struct alc_spec *spec = codec->spec; 13843 struct alc_spec *spec = codec->spec;
13753 spec->autocfg.hp_pins[0] = 0x15; 13844 spec->autocfg.hp_pins[0] = 0x15;
@@ -13755,14 +13846,14 @@ static void alc269vb_laptop_dmic_setup(struct hda_codec *codec)
13755 spec->ext_mic.pin = 0x18; 13846 spec->ext_mic.pin = 0x18;
13756 spec->ext_mic.mux_idx = 0; 13847 spec->ext_mic.mux_idx = 0;
13757 spec->int_mic.pin = 0x12; 13848 spec->int_mic.pin = 0x12;
13758 spec->int_mic.mux_idx = 6; 13849 spec->int_mic.mux_idx = 5;
13759 spec->auto_mic = 1; 13850 spec->auto_mic = 1;
13760} 13851}
13761 13852
13762static void alc269_laptop_amic_setup(struct hda_codec *codec) 13853static void alc269vb_laptop_amic_setup(struct hda_codec *codec)
13763{ 13854{
13764 struct alc_spec *spec = codec->spec; 13855 struct alc_spec *spec = codec->spec;
13765 spec->autocfg.hp_pins[0] = 0x15; 13856 spec->autocfg.hp_pins[0] = 0x21;
13766 spec->autocfg.speaker_pins[0] = 0x14; 13857 spec->autocfg.speaker_pins[0] = 0x14;
13767 spec->ext_mic.pin = 0x18; 13858 spec->ext_mic.pin = 0x18;
13768 spec->ext_mic.mux_idx = 0; 13859 spec->ext_mic.mux_idx = 0;
@@ -13771,6 +13862,18 @@ static void alc269_laptop_amic_setup(struct hda_codec *codec)
13771 spec->auto_mic = 1; 13862 spec->auto_mic = 1;
13772} 13863}
13773 13864
13865static void alc269vb_laptop_dmic_setup(struct hda_codec *codec)
13866{
13867 struct alc_spec *spec = codec->spec;
13868 spec->autocfg.hp_pins[0] = 0x21;
13869 spec->autocfg.speaker_pins[0] = 0x14;
13870 spec->ext_mic.pin = 0x18;
13871 spec->ext_mic.mux_idx = 0;
13872 spec->int_mic.pin = 0x12;
13873 spec->int_mic.mux_idx = 6;
13874 spec->auto_mic = 1;
13875}
13876
13774static void alc269_laptop_inithook(struct hda_codec *codec) 13877static void alc269_laptop_inithook(struct hda_codec *codec)
13775{ 13878{
13776 alc269_speaker_automute(codec); 13879 alc269_speaker_automute(codec);
@@ -13975,6 +14078,27 @@ static void alc269_auto_init(struct hda_codec *codec)
13975 alc_inithook(codec); 14078 alc_inithook(codec);
13976} 14079}
13977 14080
14081enum {
14082 ALC269_FIXUP_SONY_VAIO,
14083};
14084
14085const static struct hda_verb alc269_sony_vaio_fixup_verbs[] = {
14086 {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREFGRD},
14087 {}
14088};
14089
14090static const struct alc_fixup alc269_fixups[] = {
14091 [ALC269_FIXUP_SONY_VAIO] = {
14092 .verbs = alc269_sony_vaio_fixup_verbs
14093 },
14094};
14095
14096static struct snd_pci_quirk alc269_fixup_tbl[] = {
14097 SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
14098 {}
14099};
14100
14101
13978/* 14102/*
13979 * configuration and preset 14103 * configuration and preset
13980 */ 14104 */
@@ -14034,7 +14158,7 @@ static struct snd_pci_quirk alc269_cfg_tbl[] = {
14034 ALC269_DMIC), 14158 ALC269_DMIC),
14035 SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005HA", ALC269_DMIC), 14159 SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005HA", ALC269_DMIC),
14036 SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005HA", ALC269_DMIC), 14160 SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005HA", ALC269_DMIC),
14037 SND_PCI_QUIRK(0x104d, 0x9071, "SONY XTB", ALC269_DMIC), 14161 SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_AUTO),
14038 SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook ICH9M-based", ALC269_LIFEBOOK), 14162 SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook ICH9M-based", ALC269_LIFEBOOK),
14039 SND_PCI_QUIRK(0x152d, 0x1778, "Quanta ON1", ALC269_DMIC), 14163 SND_PCI_QUIRK(0x152d, 0x1778, "Quanta ON1", ALC269_DMIC),
14040 SND_PCI_QUIRK(0x1734, 0x115d, "FSC Amilo", ALC269_FUJITSU), 14164 SND_PCI_QUIRK(0x1734, 0x115d, "FSC Amilo", ALC269_FUJITSU),
@@ -14108,7 +14232,7 @@ static struct alc_config_preset alc269_presets[] = {
14108 .num_channel_mode = ARRAY_SIZE(alc269_modes), 14232 .num_channel_mode = ARRAY_SIZE(alc269_modes),
14109 .channel_mode = alc269_modes, 14233 .channel_mode = alc269_modes,
14110 .unsol_event = alc269_laptop_unsol_event, 14234 .unsol_event = alc269_laptop_unsol_event,
14111 .setup = alc269_laptop_amic_setup, 14235 .setup = alc269vb_laptop_amic_setup,
14112 .init_hook = alc269_laptop_inithook, 14236 .init_hook = alc269_laptop_inithook,
14113 }, 14237 },
14114 [ALC269VB_DMIC] = { 14238 [ALC269VB_DMIC] = {
@@ -14188,6 +14312,9 @@ static int patch_alc269(struct hda_codec *codec)
14188 board_config = ALC269_AUTO; 14312 board_config = ALC269_AUTO;
14189 } 14313 }
14190 14314
14315 if (board_config == ALC269_AUTO)
14316 alc_pick_fixup(codec, alc269_fixup_tbl, alc269_fixups, 1);
14317
14191 if (board_config == ALC269_AUTO) { 14318 if (board_config == ALC269_AUTO) {
14192 /* automatic parse from the BIOS config */ 14319 /* automatic parse from the BIOS config */
14193 err = alc269_parse_auto_config(codec); 14320 err = alc269_parse_auto_config(codec);
@@ -14240,6 +14367,9 @@ static int patch_alc269(struct hda_codec *codec)
14240 set_capture_mixer(codec); 14367 set_capture_mixer(codec);
14241 set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT); 14368 set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
14242 14369
14370 if (board_config == ALC269_AUTO)
14371 alc_pick_fixup(codec, alc269_fixup_tbl, alc269_fixups, 0);
14372
14243 spec->vmaster_nid = 0x02; 14373 spec->vmaster_nid = 0x02;
14244 14374
14245 codec->patch_ops = alc_patch_ops; 14375 codec->patch_ops = alc_patch_ops;
@@ -15328,7 +15458,8 @@ static int patch_alc861(struct hda_codec *codec)
15328 board_config = ALC861_AUTO; 15458 board_config = ALC861_AUTO;
15329 } 15459 }
15330 15460
15331 alc_pick_fixup(codec, alc861_fixup_tbl, alc861_fixups); 15461 if (board_config == ALC861_AUTO)
15462 alc_pick_fixup(codec, alc861_fixup_tbl, alc861_fixups, 1);
15332 15463
15333 if (board_config == ALC861_AUTO) { 15464 if (board_config == ALC861_AUTO) {
15334 /* automatic parse from the BIOS config */ 15465 /* automatic parse from the BIOS config */
@@ -15365,6 +15496,9 @@ static int patch_alc861(struct hda_codec *codec)
15365 15496
15366 spec->vmaster_nid = 0x03; 15497 spec->vmaster_nid = 0x03;
15367 15498
15499 if (board_config == ALC861_AUTO)
15500 alc_pick_fixup(codec, alc861_fixup_tbl, alc861_fixups, 0);
15501
15368 codec->patch_ops = alc_patch_ops; 15502 codec->patch_ops = alc_patch_ops;
15369 if (board_config == ALC861_AUTO) { 15503 if (board_config == ALC861_AUTO) {
15370 spec->init_hook = alc861_auto_init; 15504 spec->init_hook = alc861_auto_init;
@@ -16299,7 +16433,8 @@ static int patch_alc861vd(struct hda_codec *codec)
16299 board_config = ALC861VD_AUTO; 16433 board_config = ALC861VD_AUTO;
16300 } 16434 }
16301 16435
16302 alc_pick_fixup(codec, alc861vd_fixup_tbl, alc861vd_fixups); 16436 if (board_config == ALC861VD_AUTO)
16437 alc_pick_fixup(codec, alc861vd_fixup_tbl, alc861vd_fixups, 1);
16303 16438
16304 if (board_config == ALC861VD_AUTO) { 16439 if (board_config == ALC861VD_AUTO) {
16305 /* automatic parse from the BIOS config */ 16440 /* automatic parse from the BIOS config */
@@ -16347,6 +16482,9 @@ static int patch_alc861vd(struct hda_codec *codec)
16347 16482
16348 spec->vmaster_nid = 0x02; 16483 spec->vmaster_nid = 0x02;
16349 16484
16485 if (board_config == ALC861VD_AUTO)
16486 alc_pick_fixup(codec, alc861vd_fixup_tbl, alc861vd_fixups, 0);
16487
16350 codec->patch_ops = alc_patch_ops; 16488 codec->patch_ops = alc_patch_ops;
16351 16489
16352 if (board_config == ALC861VD_AUTO) 16490 if (board_config == ALC861VD_AUTO)
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 9ddc37300f6b..73453814e098 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -476,7 +476,7 @@ static struct snd_kcontrol_new *via_clone_control(struct via_spec *spec,
476 knew->name = kstrdup(tmpl->name, GFP_KERNEL); 476 knew->name = kstrdup(tmpl->name, GFP_KERNEL);
477 if (!knew->name) 477 if (!knew->name)
478 return NULL; 478 return NULL;
479 return 0; 479 return knew;
480} 480}
481 481
482static void via_free_kctls(struct hda_codec *codec) 482static void via_free_kctls(struct hda_codec *codec)
@@ -1215,14 +1215,13 @@ static struct snd_kcontrol_new via_hp_mixer[2] = {
1215 }, 1215 },
1216}; 1216};
1217 1217
1218static int via_hp_build(struct via_spec *spec) 1218static int via_hp_build(struct hda_codec *codec)
1219{ 1219{
1220 struct via_spec *spec = codec->spec;
1220 struct snd_kcontrol_new *knew; 1221 struct snd_kcontrol_new *knew;
1221 hda_nid_t nid; 1222 hda_nid_t nid;
1222 1223 int nums;
1223 knew = via_clone_control(spec, &via_hp_mixer[0]); 1224 hda_nid_t conn[HDA_MAX_CONNECTIONS];
1224 if (knew == NULL)
1225 return -ENOMEM;
1226 1225
1227 switch (spec->codec_type) { 1226 switch (spec->codec_type) {
1228 case VT1718S: 1227 case VT1718S:
@@ -1239,6 +1238,14 @@ static int via_hp_build(struct via_spec *spec)
1239 break; 1238 break;
1240 } 1239 }
1241 1240
1241 nums = snd_hda_get_connections(codec, nid, conn, HDA_MAX_CONNECTIONS);
1242 if (nums <= 1)
1243 return 0;
1244
1245 knew = via_clone_control(spec, &via_hp_mixer[0]);
1246 if (knew == NULL)
1247 return -ENOMEM;
1248
1242 knew->subdevice = HDA_SUBDEV_NID_FLAG | nid; 1249 knew->subdevice = HDA_SUBDEV_NID_FLAG | nid;
1243 knew->private_value = nid; 1250 knew->private_value = nid;
1244 1251
@@ -2561,7 +2568,7 @@ static int vt1708_parse_auto_config(struct hda_codec *codec)
2561 spec->input_mux = &spec->private_imux[0]; 2568 spec->input_mux = &spec->private_imux[0];
2562 2569
2563 if (spec->hp_mux) 2570 if (spec->hp_mux)
2564 via_hp_build(spec); 2571 via_hp_build(codec);
2565 2572
2566 via_smart51_build(spec); 2573 via_smart51_build(spec);
2567 return 1; 2574 return 1;
@@ -3087,7 +3094,7 @@ static int vt1709_parse_auto_config(struct hda_codec *codec)
3087 spec->input_mux = &spec->private_imux[0]; 3094 spec->input_mux = &spec->private_imux[0];
3088 3095
3089 if (spec->hp_mux) 3096 if (spec->hp_mux)
3090 via_hp_build(spec); 3097 via_hp_build(codec);
3091 3098
3092 via_smart51_build(spec); 3099 via_smart51_build(spec);
3093 return 1; 3100 return 1;
@@ -3654,7 +3661,7 @@ static int vt1708B_parse_auto_config(struct hda_codec *codec)
3654 spec->input_mux = &spec->private_imux[0]; 3661 spec->input_mux = &spec->private_imux[0];
3655 3662
3656 if (spec->hp_mux) 3663 if (spec->hp_mux)
3657 via_hp_build(spec); 3664 via_hp_build(codec);
3658 3665
3659 via_smart51_build(spec); 3666 via_smart51_build(spec);
3660 return 1; 3667 return 1;
@@ -4140,7 +4147,7 @@ static int vt1708S_parse_auto_config(struct hda_codec *codec)
4140 spec->input_mux = &spec->private_imux[0]; 4147 spec->input_mux = &spec->private_imux[0];
4141 4148
4142 if (spec->hp_mux) 4149 if (spec->hp_mux)
4143 via_hp_build(spec); 4150 via_hp_build(codec);
4144 4151
4145 via_smart51_build(spec); 4152 via_smart51_build(spec);
4146 return 1; 4153 return 1;
@@ -4510,7 +4517,7 @@ static int vt1702_parse_auto_config(struct hda_codec *codec)
4510 spec->input_mux = &spec->private_imux[0]; 4517 spec->input_mux = &spec->private_imux[0];
4511 4518
4512 if (spec->hp_mux) 4519 if (spec->hp_mux)
4513 via_hp_build(spec); 4520 via_hp_build(codec);
4514 4521
4515 return 1; 4522 return 1;
4516} 4523}
@@ -4930,7 +4937,7 @@ static int vt1718S_parse_auto_config(struct hda_codec *codec)
4930 spec->input_mux = &spec->private_imux[0]; 4937 spec->input_mux = &spec->private_imux[0];
4931 4938
4932 if (spec->hp_mux) 4939 if (spec->hp_mux)
4933 via_hp_build(spec); 4940 via_hp_build(codec);
4934 4941
4935 via_smart51_build(spec); 4942 via_smart51_build(spec);
4936 4943
@@ -5425,7 +5432,7 @@ static int vt1716S_parse_auto_config(struct hda_codec *codec)
5425 spec->input_mux = &spec->private_imux[0]; 5432 spec->input_mux = &spec->private_imux[0];
5426 5433
5427 if (spec->hp_mux) 5434 if (spec->hp_mux)
5428 via_hp_build(spec); 5435 via_hp_build(codec);
5429 5436
5430 via_smart51_build(spec); 5437 via_smart51_build(spec);
5431 5438
@@ -5781,7 +5788,7 @@ static int vt2002P_parse_auto_config(struct hda_codec *codec)
5781 spec->input_mux = &spec->private_imux[0]; 5788 spec->input_mux = &spec->private_imux[0];
5782 5789
5783 if (spec->hp_mux) 5790 if (spec->hp_mux)
5784 via_hp_build(spec); 5791 via_hp_build(codec);
5785 5792
5786 return 1; 5793 return 1;
5787} 5794}
@@ -6000,12 +6007,12 @@ static int vt1812_auto_create_multi_out_ctls(struct via_spec *spec,
6000 6007
6001 /* Line-Out: PortE */ 6008 /* Line-Out: PortE */
6002 err = via_add_control(spec, VIA_CTL_WIDGET_VOL, 6009 err = via_add_control(spec, VIA_CTL_WIDGET_VOL,
6003 "Master Front Playback Volume", 6010 "Front Playback Volume",
6004 HDA_COMPOSE_AMP_VAL(0x8, 3, 0, HDA_OUTPUT)); 6011 HDA_COMPOSE_AMP_VAL(0x8, 3, 0, HDA_OUTPUT));
6005 if (err < 0) 6012 if (err < 0)
6006 return err; 6013 return err;
6007 err = via_add_control(spec, VIA_CTL_WIDGET_BIND_PIN_MUTE, 6014 err = via_add_control(spec, VIA_CTL_WIDGET_BIND_PIN_MUTE,
6008 "Master Front Playback Switch", 6015 "Front Playback Switch",
6009 HDA_COMPOSE_AMP_VAL(0x28, 3, 0, HDA_OUTPUT)); 6016 HDA_COMPOSE_AMP_VAL(0x28, 3, 0, HDA_OUTPUT));
6010 if (err < 0) 6017 if (err < 0)
6011 return err; 6018 return err;
@@ -6130,7 +6137,7 @@ static int vt1812_parse_auto_config(struct hda_codec *codec)
6130 spec->input_mux = &spec->private_imux[0]; 6137 spec->input_mux = &spec->private_imux[0];
6131 6138
6132 if (spec->hp_mux) 6139 if (spec->hp_mux)
6133 via_hp_build(spec); 6140 via_hp_build(codec);
6134 6141
6135 return 1; 6142 return 1;
6136} 6143}
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index a34cbcf7904f..002e289d1255 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -23,7 +23,6 @@
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/moduleparam.h> 25#include <linux/moduleparam.h>
26#include <linux/version.h>
27#include <linux/kernel.h> 26#include <linux/kernel.h>
28#include <linux/init.h> 27#include <linux/init.h>
29#include <linux/firmware.h> 28#include <linux/firmware.h>
diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c
index 2e79d7136298..2b31ac673ea4 100644
--- a/sound/soc/imx/imx-pcm-dma-mx2.c
+++ b/sound/soc/imx/imx-pcm-dma-mx2.c
@@ -71,7 +71,12 @@ static void imx_ssi_dma_callback(int channel, void *data)
71 71
72static void snd_imx_dma_err_callback(int channel, void *data, int err) 72static void snd_imx_dma_err_callback(int channel, void *data, int err)
73{ 73{
74 pr_err("DMA error callback called\n"); 74 struct snd_pcm_substream *substream = data;
75 struct snd_soc_pcm_runtime *rtd = substream->private_data;
76 struct imx_pcm_dma_params *dma_params = rtd->dai->cpu_dai->dma_data;
77 struct snd_pcm_runtime *runtime = substream->runtime;
78 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
79 int ret;
75 80
76 pr_err("DMA timeout on channel %d -%s%s%s%s\n", 81 pr_err("DMA timeout on channel %d -%s%s%s%s\n",
77 channel, 82 channel,
@@ -79,6 +84,14 @@ static void snd_imx_dma_err_callback(int channel, void *data, int err)
79 err & IMX_DMA_ERR_REQUEST ? " request" : "", 84 err & IMX_DMA_ERR_REQUEST ? " request" : "",
80 err & IMX_DMA_ERR_TRANSFER ? " transfer" : "", 85 err & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
81 err & IMX_DMA_ERR_BUFFER ? " buffer" : ""); 86 err & IMX_DMA_ERR_BUFFER ? " buffer" : "");
87
88 imx_dma_disable(iprtd->dma);
89 ret = imx_dma_setup_sg(iprtd->dma, iprtd->sg_list, iprtd->sg_count,
90 IMX_DMA_LENGTH_LOOP, dma_params->dma_addr,
91 substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
92 DMA_MODE_WRITE : DMA_MODE_READ);
93 if (!ret)
94 imx_dma_enable(iprtd->dma);
82} 95}
83 96
84static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream) 97static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream)
diff --git a/sound/soc/imx/imx-pcm-fiq.c b/sound/soc/imx/imx-pcm-fiq.c
index f96a373699cf..6b518e07eea9 100644
--- a/sound/soc/imx/imx-pcm-fiq.c
+++ b/sound/soc/imx/imx-pcm-fiq.c
@@ -39,23 +39,24 @@ struct imx_pcm_runtime_data {
39 unsigned long offset; 39 unsigned long offset;
40 unsigned long last_offset; 40 unsigned long last_offset;
41 unsigned long size; 41 unsigned long size;
42 struct timer_list timer; 42 struct hrtimer hrt;
43 int poll_time; 43 int poll_time_ns;
44 struct snd_pcm_substream *substream;
45 atomic_t running;
44}; 46};
45 47
46static inline void imx_ssi_set_next_poll(struct imx_pcm_runtime_data *iprtd) 48static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt)
47{ 49{
48 iprtd->timer.expires = jiffies + iprtd->poll_time; 50 struct imx_pcm_runtime_data *iprtd =
49} 51 container_of(hrt, struct imx_pcm_runtime_data, hrt);
50 52 struct snd_pcm_substream *substream = iprtd->substream;
51static void imx_ssi_timer_callback(unsigned long data)
52{
53 struct snd_pcm_substream *substream = (void *)data;
54 struct snd_pcm_runtime *runtime = substream->runtime; 53 struct snd_pcm_runtime *runtime = substream->runtime;
55 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
56 struct pt_regs regs; 54 struct pt_regs regs;
57 unsigned long delta; 55 unsigned long delta;
58 56
57 if (!atomic_read(&iprtd->running))
58 return HRTIMER_NORESTART;
59
59 get_fiq_regs(&regs); 60 get_fiq_regs(&regs);
60 61
61 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 62 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
@@ -72,16 +73,14 @@ static void imx_ssi_timer_callback(unsigned long data)
72 73
73 /* If we've transferred at least a period then report it and 74 /* If we've transferred at least a period then report it and
74 * reset our poll time */ 75 * reset our poll time */
75 if (delta >= runtime->period_size) { 76 if (delta >= iprtd->period) {
76 snd_pcm_period_elapsed(substream); 77 snd_pcm_period_elapsed(substream);
77 iprtd->last_offset = iprtd->offset; 78 iprtd->last_offset = iprtd->offset;
78
79 imx_ssi_set_next_poll(iprtd);
80 } 79 }
81 80
82 /* Restart the timer; if we didn't report we'll run on the next tick */ 81 hrtimer_forward_now(hrt, ns_to_ktime(iprtd->poll_time_ns));
83 add_timer(&iprtd->timer);
84 82
83 return HRTIMER_RESTART;
85} 84}
86 85
87static struct fiq_handler fh = { 86static struct fiq_handler fh = {
@@ -99,8 +98,8 @@ static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream,
99 iprtd->period = params_period_bytes(params) ; 98 iprtd->period = params_period_bytes(params) ;
100 iprtd->offset = 0; 99 iprtd->offset = 0;
101 iprtd->last_offset = 0; 100 iprtd->last_offset = 0;
102 iprtd->poll_time = HZ / (params_rate(params) / params_period_size(params)); 101 iprtd->poll_time_ns = 1000000000 / params_rate(params) *
103 102 params_period_size(params);
104 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); 103 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
105 104
106 return 0; 105 return 0;
@@ -135,8 +134,9 @@ static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
135 case SNDRV_PCM_TRIGGER_START: 134 case SNDRV_PCM_TRIGGER_START:
136 case SNDRV_PCM_TRIGGER_RESUME: 135 case SNDRV_PCM_TRIGGER_RESUME:
137 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 136 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
138 imx_ssi_set_next_poll(iprtd); 137 atomic_set(&iprtd->running, 1);
139 add_timer(&iprtd->timer); 138 hrtimer_start(&iprtd->hrt, ns_to_ktime(iprtd->poll_time_ns),
139 HRTIMER_MODE_REL);
140 if (++fiq_enable == 1) 140 if (++fiq_enable == 1)
141 enable_fiq(imx_pcm_fiq); 141 enable_fiq(imx_pcm_fiq);
142 142
@@ -145,11 +145,11 @@ static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
145 case SNDRV_PCM_TRIGGER_STOP: 145 case SNDRV_PCM_TRIGGER_STOP:
146 case SNDRV_PCM_TRIGGER_SUSPEND: 146 case SNDRV_PCM_TRIGGER_SUSPEND:
147 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 147 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
148 del_timer(&iprtd->timer); 148 atomic_set(&iprtd->running, 0);
149
149 if (--fiq_enable == 0) 150 if (--fiq_enable == 0)
150 disable_fiq(imx_pcm_fiq); 151 disable_fiq(imx_pcm_fiq);
151 152
152
153 break; 153 break;
154 default: 154 default:
155 return -EINVAL; 155 return -EINVAL;
@@ -180,7 +180,7 @@ static struct snd_pcm_hardware snd_imx_hardware = {
180 .buffer_bytes_max = IMX_SSI_DMABUF_SIZE, 180 .buffer_bytes_max = IMX_SSI_DMABUF_SIZE,
181 .period_bytes_min = 128, 181 .period_bytes_min = 128,
182 .period_bytes_max = 16 * 1024, 182 .period_bytes_max = 16 * 1024,
183 .periods_min = 2, 183 .periods_min = 4,
184 .periods_max = 255, 184 .periods_max = 255,
185 .fifo_size = 0, 185 .fifo_size = 0,
186}; 186};
@@ -194,9 +194,11 @@ static int snd_imx_open(struct snd_pcm_substream *substream)
194 iprtd = kzalloc(sizeof(*iprtd), GFP_KERNEL); 194 iprtd = kzalloc(sizeof(*iprtd), GFP_KERNEL);
195 runtime->private_data = iprtd; 195 runtime->private_data = iprtd;
196 196
197 init_timer(&iprtd->timer); 197 iprtd->substream = substream;
198 iprtd->timer.data = (unsigned long)substream; 198
199 iprtd->timer.function = imx_ssi_timer_callback; 199 atomic_set(&iprtd->running, 0);
200 hrtimer_init(&iprtd->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
201 iprtd->hrt.function = snd_hrtimer_callback;
200 202
201 ret = snd_pcm_hw_constraint_integer(substream->runtime, 203 ret = snd_pcm_hw_constraint_integer(substream->runtime,
202 SNDRV_PCM_HW_PARAM_PERIODS); 204 SNDRV_PCM_HW_PARAM_PERIODS);
@@ -212,7 +214,8 @@ static int snd_imx_close(struct snd_pcm_substream *substream)
212 struct snd_pcm_runtime *runtime = substream->runtime; 214 struct snd_pcm_runtime *runtime = substream->runtime;
213 struct imx_pcm_runtime_data *iprtd = runtime->private_data; 215 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
214 216
215 del_timer_sync(&iprtd->timer); 217 hrtimer_cancel(&iprtd->hrt);
218
216 kfree(iprtd); 219 kfree(iprtd);
217 220
218 return 0; 221 return 0;
diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c
index 0bcc6d7d9471..80b4fee2442b 100644
--- a/sound/soc/imx/imx-ssi.c
+++ b/sound/soc/imx/imx-ssi.c
@@ -656,7 +656,8 @@ static int imx_ssi_probe(struct platform_device *pdev)
656 dai->private_data = ssi; 656 dai->private_data = ssi;
657 657
658 if ((cpu_is_mx27() || cpu_is_mx21()) && 658 if ((cpu_is_mx27() || cpu_is_mx21()) &&
659 !(ssi->flags & IMX_SSI_USE_AC97)) { 659 !(ssi->flags & IMX_SSI_USE_AC97) &&
660 (ssi->flags & IMX_SSI_DMA)) {
660 ssi->flags |= IMX_SSI_DMA; 661 ssi->flags |= IMX_SSI_DMA;
661 platform = imx_ssi_dma_mx2_init(pdev, ssi); 662 platform = imx_ssi_dma_mx2_init(pdev, ssi);
662 } else 663 } else
diff --git a/sound/usb/usbmidi.c b/sound/usb/usbmidi.c
index 2c59afd99611..9e28b20cb2ce 100644
--- a/sound/usb/usbmidi.c
+++ b/sound/usb/usbmidi.c
@@ -986,6 +986,8 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
986 DEFINE_WAIT(wait); 986 DEFINE_WAIT(wait);
987 long timeout = msecs_to_jiffies(50); 987 long timeout = msecs_to_jiffies(50);
988 988
989 if (ep->umidi->disconnected)
990 return;
989 /* 991 /*
990 * The substream buffer is empty, but some data might still be in the 992 * The substream buffer is empty, but some data might still be in the
991 * currently active URBs, so we have to wait for those to complete. 993 * currently active URBs, so we have to wait for those to complete.
@@ -1123,14 +1125,21 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi* umidi,
1123 * Frees an output endpoint. 1125 * Frees an output endpoint.
1124 * May be called when ep hasn't been initialized completely. 1126 * May be called when ep hasn't been initialized completely.
1125 */ 1127 */
1126static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint* ep) 1128static void snd_usbmidi_out_endpoint_clear(struct snd_usb_midi_out_endpoint *ep)
1127{ 1129{
1128 unsigned int i; 1130 unsigned int i;
1129 1131
1130 for (i = 0; i < OUTPUT_URBS; ++i) 1132 for (i = 0; i < OUTPUT_URBS; ++i)
1131 if (ep->urbs[i].urb) 1133 if (ep->urbs[i].urb) {
1132 free_urb_and_buffer(ep->umidi, ep->urbs[i].urb, 1134 free_urb_and_buffer(ep->umidi, ep->urbs[i].urb,
1133 ep->max_transfer); 1135 ep->max_transfer);
1136 ep->urbs[i].urb = NULL;
1137 }
1138}
1139
1140static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint *ep)
1141{
1142 snd_usbmidi_out_endpoint_clear(ep);
1134 kfree(ep); 1143 kfree(ep);
1135} 1144}
1136 1145
@@ -1262,15 +1271,18 @@ void snd_usbmidi_disconnect(struct list_head* p)
1262 usb_kill_urb(ep->out->urbs[j].urb); 1271 usb_kill_urb(ep->out->urbs[j].urb);
1263 if (umidi->usb_protocol_ops->finish_out_endpoint) 1272 if (umidi->usb_protocol_ops->finish_out_endpoint)
1264 umidi->usb_protocol_ops->finish_out_endpoint(ep->out); 1273 umidi->usb_protocol_ops->finish_out_endpoint(ep->out);
1274 ep->out->active_urbs = 0;
1275 if (ep->out->drain_urbs) {
1276 ep->out->drain_urbs = 0;
1277 wake_up(&ep->out->drain_wait);
1278 }
1265 } 1279 }
1266 if (ep->in) 1280 if (ep->in)
1267 for (j = 0; j < INPUT_URBS; ++j) 1281 for (j = 0; j < INPUT_URBS; ++j)
1268 usb_kill_urb(ep->in->urbs[j]); 1282 usb_kill_urb(ep->in->urbs[j]);
1269 /* free endpoints here; later call can result in Oops */ 1283 /* free endpoints here; later call can result in Oops */
1270 if (ep->out) { 1284 if (ep->out)
1271 snd_usbmidi_out_endpoint_delete(ep->out); 1285 snd_usbmidi_out_endpoint_clear(ep->out);
1272 ep->out = NULL;
1273 }
1274 if (ep->in) { 1286 if (ep->in) {
1275 snd_usbmidi_in_endpoint_delete(ep->in); 1287 snd_usbmidi_in_endpoint_delete(ep->in);
1276 ep->in = NULL; 1288 ep->in = NULL;